1b31b564246719881ea280d65b02505fe881ba27
[mesa.git] / src / gallium / auxiliary / nir / tgsi_to_nir.c
1 /*
2 * Copyright © 2014-2015 Broadcom
3 * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 #include "util/ralloc.h"
26 #include "compiler/nir/nir.h"
27 #include "compiler/nir/nir_control_flow.h"
28 #include "compiler/nir/nir_builder.h"
29 #include "compiler/glsl/list.h"
30 #include "compiler/shader_enums.h"
31
32 #include "tgsi_to_nir.h"
33 #include "tgsi/tgsi_parse.h"
34 #include "tgsi/tgsi_dump.h"
35 #include "tgsi/tgsi_info.h"
36 #include "tgsi/tgsi_scan.h"
37 #include "tgsi/tgsi_from_mesa.h"
38
39 #define SWIZ(X, Y, Z, W) (unsigned[4]){ \
40 TGSI_SWIZZLE_##X, \
41 TGSI_SWIZZLE_##Y, \
42 TGSI_SWIZZLE_##Z, \
43 TGSI_SWIZZLE_##W, \
44 }
45
46 struct ttn_reg_info {
47 /** nir register containing this TGSI index. */
48 nir_register *reg;
49 nir_variable *var;
50 /** Offset (in vec4s) from the start of var for this TGSI index. */
51 int offset;
52 };
53
54 struct ttn_compile {
55 union tgsi_full_token *token;
56 nir_builder build;
57 struct tgsi_shader_info *scan;
58
59 struct ttn_reg_info *output_regs;
60 struct ttn_reg_info *temp_regs;
61 nir_ssa_def **imm_defs;
62
63 unsigned num_samp_types;
64 nir_alu_type *samp_types;
65
66 nir_register *addr_reg;
67
68 /**
69 * Stack of nir_cursors where instructions should be pushed as we pop
70 * back out of the control flow stack.
71 *
72 * For each IF/ELSE/ENDIF block, if_stack[if_stack_pos] has where the else
73 * instructions should be placed, and if_stack[if_stack_pos - 1] has where
74 * the next instructions outside of the if/then/else block go.
75 */
76 nir_cursor *if_stack;
77 unsigned if_stack_pos;
78
79 /**
80 * Stack of nir_cursors where instructions should be pushed as we pop
81 * back out of the control flow stack.
82 *
83 * loop_stack[loop_stack_pos - 1] contains the cf_node_list for the outside
84 * of the loop.
85 */
86 nir_cursor *loop_stack;
87 unsigned loop_stack_pos;
88
89 /* How many TGSI_FILE_IMMEDIATE vec4s have been parsed so far. */
90 unsigned next_imm;
91 };
92
93 #define ttn_swizzle(b, src, x, y, z, w) \
94 nir_swizzle(b, src, SWIZ(x, y, z, w), 4, false)
95 #define ttn_channel(b, src, swiz) \
96 nir_swizzle(b, src, SWIZ(swiz, swiz, swiz, swiz), 1, false)
97
98 static gl_varying_slot
99 tgsi_varying_semantic_to_slot(unsigned semantic, unsigned index)
100 {
101 switch (semantic) {
102 case TGSI_SEMANTIC_POSITION:
103 return VARYING_SLOT_POS;
104 case TGSI_SEMANTIC_COLOR:
105 if (index == 0)
106 return VARYING_SLOT_COL0;
107 else
108 return VARYING_SLOT_COL1;
109 case TGSI_SEMANTIC_BCOLOR:
110 if (index == 0)
111 return VARYING_SLOT_BFC0;
112 else
113 return VARYING_SLOT_BFC1;
114 case TGSI_SEMANTIC_FOG:
115 return VARYING_SLOT_FOGC;
116 case TGSI_SEMANTIC_PSIZE:
117 return VARYING_SLOT_PSIZ;
118 case TGSI_SEMANTIC_GENERIC:
119 return VARYING_SLOT_VAR0 + index;
120 case TGSI_SEMANTIC_FACE:
121 return VARYING_SLOT_FACE;
122 case TGSI_SEMANTIC_EDGEFLAG:
123 return VARYING_SLOT_EDGE;
124 case TGSI_SEMANTIC_PRIMID:
125 return VARYING_SLOT_PRIMITIVE_ID;
126 case TGSI_SEMANTIC_CLIPDIST:
127 if (index == 0)
128 return VARYING_SLOT_CLIP_DIST0;
129 else
130 return VARYING_SLOT_CLIP_DIST1;
131 case TGSI_SEMANTIC_CLIPVERTEX:
132 return VARYING_SLOT_CLIP_VERTEX;
133 case TGSI_SEMANTIC_TEXCOORD:
134 return VARYING_SLOT_TEX0 + index;
135 case TGSI_SEMANTIC_PCOORD:
136 return VARYING_SLOT_PNTC;
137 case TGSI_SEMANTIC_VIEWPORT_INDEX:
138 return VARYING_SLOT_VIEWPORT;
139 case TGSI_SEMANTIC_LAYER:
140 return VARYING_SLOT_LAYER;
141 default:
142 fprintf(stderr, "Bad TGSI semantic: %d/%d\n", semantic, index);
143 abort();
144 }
145 }
146
147 /* Temporary helper to remap back to TGSI style semantic name/index
148 * values, for use in drivers that haven't been converted to using
149 * VARYING_SLOT_
150 */
151 void
152 varying_slot_to_tgsi_semantic(gl_varying_slot slot,
153 unsigned *semantic_name, unsigned *semantic_index)
154 {
155 static const unsigned map[][2] = {
156 [VARYING_SLOT_POS] = { TGSI_SEMANTIC_POSITION, 0 },
157 [VARYING_SLOT_COL0] = { TGSI_SEMANTIC_COLOR, 0 },
158 [VARYING_SLOT_COL1] = { TGSI_SEMANTIC_COLOR, 1 },
159 [VARYING_SLOT_BFC0] = { TGSI_SEMANTIC_BCOLOR, 0 },
160 [VARYING_SLOT_BFC1] = { TGSI_SEMANTIC_BCOLOR, 1 },
161 [VARYING_SLOT_FOGC] = { TGSI_SEMANTIC_FOG, 0 },
162 [VARYING_SLOT_PSIZ] = { TGSI_SEMANTIC_PSIZE, 0 },
163 [VARYING_SLOT_FACE] = { TGSI_SEMANTIC_FACE, 0 },
164 [VARYING_SLOT_EDGE] = { TGSI_SEMANTIC_EDGEFLAG, 0 },
165 [VARYING_SLOT_PRIMITIVE_ID] = { TGSI_SEMANTIC_PRIMID, 0 },
166 [VARYING_SLOT_CLIP_DIST0] = { TGSI_SEMANTIC_CLIPDIST, 0 },
167 [VARYING_SLOT_CLIP_DIST1] = { TGSI_SEMANTIC_CLIPDIST, 1 },
168 [VARYING_SLOT_CLIP_VERTEX] = { TGSI_SEMANTIC_CLIPVERTEX, 0 },
169 [VARYING_SLOT_PNTC] = { TGSI_SEMANTIC_PCOORD, 0 },
170 [VARYING_SLOT_VIEWPORT] = { TGSI_SEMANTIC_VIEWPORT_INDEX, 0 },
171 [VARYING_SLOT_LAYER] = { TGSI_SEMANTIC_LAYER, 0 },
172 };
173
174 if (slot >= VARYING_SLOT_VAR0) {
175 *semantic_name = TGSI_SEMANTIC_GENERIC;
176 *semantic_index = slot - VARYING_SLOT_VAR0;
177 return;
178 }
179
180 if (slot >= VARYING_SLOT_TEX0 && slot <= VARYING_SLOT_TEX7) {
181 *semantic_name = TGSI_SEMANTIC_TEXCOORD;
182 *semantic_index = slot - VARYING_SLOT_TEX0;
183 return;
184 }
185
186 if (slot >= ARRAY_SIZE(map)) {
187 fprintf(stderr, "Unknown varying slot %d\n", slot);
188 abort();
189 }
190
191 *semantic_name = map[slot][0];
192 *semantic_index = map[slot][1];
193 }
194
195 /* Temporary helper to remap back to TGSI style semantic name/index
196 * values, for use in drivers that haven't been converted to using
197 * FRAG_RESULT_
198 */
199 void
200 frag_result_to_tgsi_semantic(gl_frag_result slot,
201 unsigned *semantic_name, unsigned *semantic_index)
202 {
203 static const unsigned map[][2] = {
204 [FRAG_RESULT_DEPTH] = { TGSI_SEMANTIC_POSITION, 0 },
205 [FRAG_RESULT_COLOR] = { TGSI_SEMANTIC_COLOR, -1 },
206 [FRAG_RESULT_DATA0 + 0] = { TGSI_SEMANTIC_COLOR, 0 },
207 [FRAG_RESULT_DATA0 + 1] = { TGSI_SEMANTIC_COLOR, 1 },
208 [FRAG_RESULT_DATA0 + 2] = { TGSI_SEMANTIC_COLOR, 2 },
209 [FRAG_RESULT_DATA0 + 3] = { TGSI_SEMANTIC_COLOR, 3 },
210 [FRAG_RESULT_DATA0 + 4] = { TGSI_SEMANTIC_COLOR, 4 },
211 [FRAG_RESULT_DATA0 + 5] = { TGSI_SEMANTIC_COLOR, 5 },
212 [FRAG_RESULT_DATA0 + 6] = { TGSI_SEMANTIC_COLOR, 6 },
213 [FRAG_RESULT_DATA0 + 7] = { TGSI_SEMANTIC_COLOR, 7 },
214 };
215
216 *semantic_name = map[slot][0];
217 *semantic_index = map[slot][1];
218 }
219
220 static nir_ssa_def *
221 ttn_src_for_dest(nir_builder *b, nir_alu_dest *dest)
222 {
223 nir_alu_src src;
224 memset(&src, 0, sizeof(src));
225
226 if (dest->dest.is_ssa)
227 src.src = nir_src_for_ssa(&dest->dest.ssa);
228 else {
229 assert(!dest->dest.reg.indirect);
230 src.src = nir_src_for_reg(dest->dest.reg.reg);
231 src.src.reg.base_offset = dest->dest.reg.base_offset;
232 }
233
234 for (int i = 0; i < 4; i++)
235 src.swizzle[i] = i;
236
237 return nir_fmov_alu(b, src, 4);
238 }
239
240 static void
241 ttn_emit_declaration(struct ttn_compile *c)
242 {
243 nir_builder *b = &c->build;
244 struct tgsi_full_declaration *decl = &c->token->FullDeclaration;
245 unsigned array_size = decl->Range.Last - decl->Range.First + 1;
246 unsigned file = decl->Declaration.File;
247 unsigned i;
248
249 if (file == TGSI_FILE_TEMPORARY) {
250 if (decl->Declaration.Array) {
251 /* for arrays, we create variables instead of registers: */
252 nir_variable *var = rzalloc(b->shader, nir_variable);
253
254 var->type = glsl_array_type(glsl_vec4_type(), array_size);
255 var->data.mode = nir_var_global;
256 var->name = ralloc_asprintf(var, "arr_%d", decl->Array.ArrayID);
257
258 exec_list_push_tail(&b->shader->globals, &var->node);
259
260 for (i = 0; i < array_size; i++) {
261 /* point all the matching slots to the same var,
262 * with appropriate offset set, mostly just so
263 * we know what to do when tgsi does a non-indirect
264 * access
265 */
266 c->temp_regs[decl->Range.First + i].reg = NULL;
267 c->temp_regs[decl->Range.First + i].var = var;
268 c->temp_regs[decl->Range.First + i].offset = i;
269 }
270 } else {
271 for (i = 0; i < array_size; i++) {
272 nir_register *reg = nir_local_reg_create(b->impl);
273 reg->num_components = 4;
274 c->temp_regs[decl->Range.First + i].reg = reg;
275 c->temp_regs[decl->Range.First + i].var = NULL;
276 c->temp_regs[decl->Range.First + i].offset = 0;
277 }
278 }
279 } else if (file == TGSI_FILE_ADDRESS) {
280 c->addr_reg = nir_local_reg_create(b->impl);
281 c->addr_reg->num_components = 4;
282 } else if (file == TGSI_FILE_SYSTEM_VALUE) {
283 /* Nothing to record for system values. */
284 } else if (file == TGSI_FILE_SAMPLER) {
285 /* Nothing to record for samplers. */
286 } else if (file == TGSI_FILE_SAMPLER_VIEW) {
287 struct tgsi_declaration_sampler_view *sview = &decl->SamplerView;
288 nir_alu_type type;
289
290 assert((sview->ReturnTypeX == sview->ReturnTypeY) &&
291 (sview->ReturnTypeX == sview->ReturnTypeZ) &&
292 (sview->ReturnTypeX == sview->ReturnTypeW));
293
294 switch (sview->ReturnTypeX) {
295 case TGSI_RETURN_TYPE_SINT:
296 type = nir_type_int;
297 break;
298 case TGSI_RETURN_TYPE_UINT:
299 type = nir_type_uint;
300 break;
301 case TGSI_RETURN_TYPE_FLOAT:
302 default:
303 type = nir_type_float;
304 break;
305 }
306
307 for (i = 0; i < array_size; i++) {
308 c->samp_types[decl->Range.First + i] = type;
309 }
310 } else {
311 bool is_array = (array_size > 1);
312
313 assert(file == TGSI_FILE_INPUT ||
314 file == TGSI_FILE_OUTPUT ||
315 file == TGSI_FILE_CONSTANT);
316
317 /* nothing to do for UBOs: */
318 if ((file == TGSI_FILE_CONSTANT) && decl->Declaration.Dimension &&
319 decl->Dim.Index2D != 0) {
320 b->shader->info.num_ubos =
321 MAX2(b->shader->info.num_ubos, decl->Dim.Index2D);
322 return;
323 }
324
325 if ((file == TGSI_FILE_INPUT) || (file == TGSI_FILE_OUTPUT)) {
326 is_array = (is_array && decl->Declaration.Array &&
327 (decl->Array.ArrayID != 0));
328 }
329
330 for (i = 0; i < array_size; i++) {
331 unsigned idx = decl->Range.First + i;
332 nir_variable *var = rzalloc(b->shader, nir_variable);
333
334 var->data.driver_location = idx;
335
336 var->type = glsl_vec4_type();
337 if (is_array)
338 var->type = glsl_array_type(var->type, array_size);
339
340 switch (file) {
341 case TGSI_FILE_INPUT:
342 var->data.read_only = true;
343 var->data.mode = nir_var_shader_in;
344 var->name = ralloc_asprintf(var, "in_%d", idx);
345
346 if (c->scan->processor == PIPE_SHADER_FRAGMENT) {
347 if (decl->Semantic.Name == TGSI_SEMANTIC_FACE) {
348 var->data.location = SYSTEM_VALUE_FRONT_FACE;
349 var->data.mode = nir_var_system_value;
350 } else {
351 var->data.location =
352 tgsi_varying_semantic_to_slot(decl->Semantic.Name,
353 decl->Semantic.Index);
354 }
355 } else {
356 assert(!decl->Declaration.Semantic);
357 var->data.location = VERT_ATTRIB_GENERIC0 + idx;
358 }
359 var->data.index = 0;
360
361 /* We definitely need to translate the interpolation field, because
362 * nir_print will decode it.
363 */
364 switch (decl->Interp.Interpolate) {
365 case TGSI_INTERPOLATE_CONSTANT:
366 var->data.interpolation = INTERP_MODE_FLAT;
367 break;
368 case TGSI_INTERPOLATE_LINEAR:
369 var->data.interpolation = INTERP_MODE_NOPERSPECTIVE;
370 break;
371 case TGSI_INTERPOLATE_PERSPECTIVE:
372 var->data.interpolation = INTERP_MODE_SMOOTH;
373 break;
374 }
375
376 exec_list_push_tail(&b->shader->inputs, &var->node);
377
378 for (int i = 0; i < array_size; i++)
379 b->shader->info.inputs_read |= 1 << (var->data.location + i);
380
381 break;
382 case TGSI_FILE_OUTPUT: {
383 int semantic_name = decl->Semantic.Name;
384 int semantic_index = decl->Semantic.Index;
385 /* Since we can't load from outputs in the IR, we make temporaries
386 * for the outputs and emit stores to the real outputs at the end of
387 * the shader.
388 */
389 nir_register *reg = nir_local_reg_create(b->impl);
390 reg->num_components = 4;
391 if (is_array)
392 reg->num_array_elems = array_size;
393
394 var->data.mode = nir_var_shader_out;
395 var->name = ralloc_asprintf(var, "out_%d", idx);
396 var->data.index = 0;
397
398 if (c->scan->processor == PIPE_SHADER_FRAGMENT) {
399 switch (semantic_name) {
400 case TGSI_SEMANTIC_COLOR: {
401 /* TODO tgsi loses some information, so we cannot
402 * actually differentiate here between DSB and MRT
403 * at this point. But so far no drivers using tgsi-
404 * to-nir support dual source blend:
405 */
406 bool dual_src_blend = false;
407 if (dual_src_blend && (semantic_index == 1)) {
408 var->data.location = FRAG_RESULT_DATA0;
409 var->data.index = 1;
410 } else {
411 if (c->scan->properties[TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS])
412 var->data.location = FRAG_RESULT_COLOR;
413 else
414 var->data.location = FRAG_RESULT_DATA0 + semantic_index;
415 }
416 break;
417 }
418 case TGSI_SEMANTIC_POSITION:
419 var->data.location = FRAG_RESULT_DEPTH;
420 break;
421 default:
422 fprintf(stderr, "Bad TGSI semantic: %d/%d\n",
423 decl->Semantic.Name, decl->Semantic.Index);
424 abort();
425 }
426 } else {
427 var->data.location =
428 tgsi_varying_semantic_to_slot(semantic_name, semantic_index);
429 }
430
431 if (is_array) {
432 unsigned j;
433 for (j = 0; j < array_size; j++) {
434 c->output_regs[idx + j].offset = i + j;
435 c->output_regs[idx + j].reg = reg;
436 }
437 } else {
438 c->output_regs[idx].offset = i;
439 c->output_regs[idx].reg = reg;
440 }
441
442 exec_list_push_tail(&b->shader->outputs, &var->node);
443
444 for (int i = 0; i < array_size; i++)
445 b->shader->info.outputs_written |= 1 << (var->data.location + i);
446 }
447 break;
448 case TGSI_FILE_CONSTANT:
449 var->data.mode = nir_var_uniform;
450 var->name = ralloc_asprintf(var, "uniform_%d", idx);
451
452 exec_list_push_tail(&b->shader->uniforms, &var->node);
453 break;
454 default:
455 unreachable("bad declaration file");
456 return;
457 }
458
459 if (is_array)
460 break;
461 }
462
463 }
464 }
465
466 static void
467 ttn_emit_immediate(struct ttn_compile *c)
468 {
469 nir_builder *b = &c->build;
470 struct tgsi_full_immediate *tgsi_imm = &c->token->FullImmediate;
471 nir_load_const_instr *load_const;
472 int i;
473
474 load_const = nir_load_const_instr_create(b->shader, 4, 32);
475 c->imm_defs[c->next_imm] = &load_const->def;
476 c->next_imm++;
477
478 for (i = 0; i < 4; i++)
479 load_const->value.u32[i] = tgsi_imm->u[i].Uint;
480
481 nir_builder_instr_insert(b, &load_const->instr);
482 }
483
484 static nir_ssa_def *
485 ttn_src_for_indirect(struct ttn_compile *c, struct tgsi_ind_register *indirect);
486
487 /* generate either a constant or indirect deref chain for accessing an
488 * array variable.
489 */
490 static nir_deref_instr *
491 ttn_array_deref(struct ttn_compile *c, nir_variable *var, unsigned offset,
492 struct tgsi_ind_register *indirect)
493 {
494 nir_deref_instr *deref = nir_build_deref_var(&c->build, var);
495 nir_ssa_def *index = nir_imm_int(&c->build, offset);
496 if (indirect)
497 index = nir_iadd(&c->build, index, ttn_src_for_indirect(c, indirect));
498 return nir_build_deref_array(&c->build, deref, index);
499 }
500
501 static nir_src
502 ttn_src_for_file_and_index(struct ttn_compile *c, unsigned file, unsigned index,
503 struct tgsi_ind_register *indirect,
504 struct tgsi_dimension *dim,
505 struct tgsi_ind_register *dimind)
506 {
507 nir_builder *b = &c->build;
508 nir_src src;
509
510 memset(&src, 0, sizeof(src));
511
512 switch (file) {
513 case TGSI_FILE_TEMPORARY:
514 if (c->temp_regs[index].var) {
515 unsigned offset = c->temp_regs[index].offset;
516 nir_variable *var = c->temp_regs[index].var;
517 nir_ssa_def *load = nir_load_deref(&c->build,
518 ttn_array_deref(c, var, offset, indirect));
519
520 src = nir_src_for_ssa(load);
521 } else {
522 assert(!indirect);
523 src.reg.reg = c->temp_regs[index].reg;
524 }
525 assert(!dim);
526 break;
527
528 case TGSI_FILE_ADDRESS:
529 src.reg.reg = c->addr_reg;
530 assert(!dim);
531 break;
532
533 case TGSI_FILE_IMMEDIATE:
534 src = nir_src_for_ssa(c->imm_defs[index]);
535 assert(!indirect);
536 assert(!dim);
537 break;
538
539 case TGSI_FILE_SYSTEM_VALUE: {
540 nir_intrinsic_instr *load;
541 nir_intrinsic_op op;
542 unsigned ncomp = 1;
543
544 assert(!indirect);
545 assert(!dim);
546
547 switch (c->scan->system_value_semantic_name[index]) {
548 case TGSI_SEMANTIC_VERTEXID_NOBASE:
549 op = nir_intrinsic_load_vertex_id_zero_base;
550 break;
551 case TGSI_SEMANTIC_VERTEXID:
552 op = nir_intrinsic_load_vertex_id;
553 break;
554 case TGSI_SEMANTIC_BASEVERTEX:
555 op = nir_intrinsic_load_base_vertex;
556 break;
557 case TGSI_SEMANTIC_INSTANCEID:
558 op = nir_intrinsic_load_instance_id;
559 break;
560 default:
561 unreachable("bad system value");
562 }
563
564 load = nir_intrinsic_instr_create(b->shader, op);
565 load->num_components = ncomp;
566
567 nir_ssa_dest_init(&load->instr, &load->dest, ncomp, 32, NULL);
568 nir_builder_instr_insert(b, &load->instr);
569
570 src = nir_src_for_ssa(&load->dest.ssa);
571
572 b->shader->info.system_values_read |=
573 (1 << nir_system_value_from_intrinsic(op));
574
575 break;
576 }
577
578 case TGSI_FILE_INPUT:
579 case TGSI_FILE_CONSTANT: {
580 nir_intrinsic_instr *load;
581 nir_intrinsic_op op;
582 unsigned srcn = 0;
583
584 switch (file) {
585 case TGSI_FILE_INPUT:
586 /* Special case: Turn the frontface varying into a load of the
587 * frontface intrinsic plus math, and appending the silly floats.
588 */
589 if (c->scan->processor == PIPE_SHADER_FRAGMENT &&
590 c->scan->input_semantic_name[index] == TGSI_SEMANTIC_FACE) {
591 nir_ssa_def *tgsi_frontface[4] = {
592 nir_bcsel(&c->build,
593 nir_load_system_value(&c->build,
594 nir_intrinsic_load_front_face, 0),
595 nir_imm_float(&c->build, 1.0),
596 nir_imm_float(&c->build, -1.0)),
597 nir_imm_float(&c->build, 0.0),
598 nir_imm_float(&c->build, 0.0),
599 nir_imm_float(&c->build, 1.0),
600 };
601
602 return nir_src_for_ssa(nir_vec(&c->build, tgsi_frontface, 4));
603 }
604
605 op = nir_intrinsic_load_input;
606 assert(!dim);
607 break;
608 case TGSI_FILE_CONSTANT:
609 if (dim && (dim->Index > 0 || dim->Indirect)) {
610 op = nir_intrinsic_load_ubo;
611 } else {
612 op = nir_intrinsic_load_uniform;
613 }
614 break;
615 default:
616 unreachable("No other load files supported");
617 break;
618 }
619
620 load = nir_intrinsic_instr_create(b->shader, op);
621
622 load->num_components = 4;
623 if (dim && (dim->Index > 0 || dim->Indirect)) {
624 if (dimind) {
625 load->src[srcn] =
626 ttn_src_for_file_and_index(c, dimind->File, dimind->Index,
627 NULL, NULL, NULL);
628 } else {
629 /* UBOs start at index 1 in TGSI: */
630 load->src[srcn] =
631 nir_src_for_ssa(nir_imm_int(b, dim->Index - 1));
632 }
633 srcn++;
634 }
635
636 nir_ssa_def *offset;
637 if (op == nir_intrinsic_load_ubo) {
638 /* UBO loads don't have a base offset. */
639 offset = nir_imm_int(b, index);
640 if (indirect) {
641 offset = nir_iadd(b, offset, ttn_src_for_indirect(c, indirect));
642 }
643 /* UBO offsets are in bytes, but TGSI gives them to us in vec4's */
644 offset = nir_ishl(b, offset, nir_imm_int(b, 4));
645 } else {
646 nir_intrinsic_set_base(load, index);
647 if (indirect) {
648 offset = ttn_src_for_indirect(c, indirect);
649 } else {
650 offset = nir_imm_int(b, 0);
651 }
652 }
653 load->src[srcn++] = nir_src_for_ssa(offset);
654
655 nir_ssa_dest_init(&load->instr, &load->dest, 4, 32, NULL);
656 nir_builder_instr_insert(b, &load->instr);
657
658 src = nir_src_for_ssa(&load->dest.ssa);
659 break;
660 }
661
662 default:
663 unreachable("bad src file");
664 }
665
666
667 return src;
668 }
669
670 static nir_ssa_def *
671 ttn_src_for_indirect(struct ttn_compile *c, struct tgsi_ind_register *indirect)
672 {
673 nir_builder *b = &c->build;
674 nir_alu_src src;
675 memset(&src, 0, sizeof(src));
676 for (int i = 0; i < 4; i++)
677 src.swizzle[i] = indirect->Swizzle;
678 src.src = ttn_src_for_file_and_index(c,
679 indirect->File,
680 indirect->Index,
681 NULL, NULL, NULL);
682 return nir_imov_alu(b, src, 1);
683 }
684
685 static nir_alu_dest
686 ttn_get_dest(struct ttn_compile *c, struct tgsi_full_dst_register *tgsi_fdst)
687 {
688 struct tgsi_dst_register *tgsi_dst = &tgsi_fdst->Register;
689 nir_alu_dest dest;
690 unsigned index = tgsi_dst->Index;
691
692 memset(&dest, 0, sizeof(dest));
693
694 if (tgsi_dst->File == TGSI_FILE_TEMPORARY) {
695 if (c->temp_regs[index].var) {
696 nir_register *reg;
697
698 /* this works, because TGSI will give us a base offset
699 * (in case of indirect index) that points back into
700 * the array. Access can be direct or indirect, we
701 * don't really care. Just create a one-shot dst reg
702 * that will get store_var'd back into the array var
703 * at the end of ttn_emit_instruction()
704 */
705 reg = nir_local_reg_create(c->build.impl);
706 reg->num_components = 4;
707 dest.dest.reg.reg = reg;
708 dest.dest.reg.base_offset = 0;
709 } else {
710 assert(!tgsi_dst->Indirect);
711 dest.dest.reg.reg = c->temp_regs[index].reg;
712 dest.dest.reg.base_offset = c->temp_regs[index].offset;
713 }
714 } else if (tgsi_dst->File == TGSI_FILE_OUTPUT) {
715 dest.dest.reg.reg = c->output_regs[index].reg;
716 dest.dest.reg.base_offset = c->output_regs[index].offset;
717 } else if (tgsi_dst->File == TGSI_FILE_ADDRESS) {
718 assert(index == 0);
719 dest.dest.reg.reg = c->addr_reg;
720 }
721
722 dest.write_mask = tgsi_dst->WriteMask;
723 dest.saturate = false;
724
725 if (tgsi_dst->Indirect && (tgsi_dst->File != TGSI_FILE_TEMPORARY)) {
726 nir_src *indirect = ralloc(c->build.shader, nir_src);
727 *indirect = nir_src_for_ssa(ttn_src_for_indirect(c, &tgsi_fdst->Indirect));
728 dest.dest.reg.indirect = indirect;
729 }
730
731 return dest;
732 }
733
734 static nir_variable *
735 ttn_get_var(struct ttn_compile *c, struct tgsi_full_dst_register *tgsi_fdst)
736 {
737 struct tgsi_dst_register *tgsi_dst = &tgsi_fdst->Register;
738 unsigned index = tgsi_dst->Index;
739
740 if (tgsi_dst->File == TGSI_FILE_TEMPORARY) {
741 /* we should not have an indirect when there is no var! */
742 if (!c->temp_regs[index].var)
743 assert(!tgsi_dst->Indirect);
744 return c->temp_regs[index].var;
745 }
746
747 return NULL;
748 }
749
750 static nir_ssa_def *
751 ttn_get_src(struct ttn_compile *c, struct tgsi_full_src_register *tgsi_fsrc,
752 int src_idx)
753 {
754 nir_builder *b = &c->build;
755 struct tgsi_src_register *tgsi_src = &tgsi_fsrc->Register;
756 enum tgsi_opcode opcode = c->token->FullInstruction.Instruction.Opcode;
757 unsigned tgsi_src_type = tgsi_opcode_infer_src_type(opcode, src_idx);
758 bool src_is_float = !(tgsi_src_type == TGSI_TYPE_SIGNED ||
759 tgsi_src_type == TGSI_TYPE_UNSIGNED);
760 nir_alu_src src;
761
762 memset(&src, 0, sizeof(src));
763
764 if (tgsi_src->File == TGSI_FILE_NULL) {
765 return nir_imm_float(b, 0.0);
766 } else if (tgsi_src->File == TGSI_FILE_SAMPLER) {
767 /* Only the index of the sampler gets used in texturing, and it will
768 * handle looking that up on its own instead of using the nir_alu_src.
769 */
770 assert(!tgsi_src->Indirect);
771 return NULL;
772 } else {
773 struct tgsi_ind_register *ind = NULL;
774 struct tgsi_dimension *dim = NULL;
775 struct tgsi_ind_register *dimind = NULL;
776 if (tgsi_src->Indirect)
777 ind = &tgsi_fsrc->Indirect;
778 if (tgsi_src->Dimension) {
779 dim = &tgsi_fsrc->Dimension;
780 if (dim->Indirect)
781 dimind = &tgsi_fsrc->DimIndirect;
782 }
783 src.src = ttn_src_for_file_and_index(c,
784 tgsi_src->File,
785 tgsi_src->Index,
786 ind, dim, dimind);
787 }
788
789 src.swizzle[0] = tgsi_src->SwizzleX;
790 src.swizzle[1] = tgsi_src->SwizzleY;
791 src.swizzle[2] = tgsi_src->SwizzleZ;
792 src.swizzle[3] = tgsi_src->SwizzleW;
793
794 nir_ssa_def *def = nir_fmov_alu(b, src, 4);
795
796 if (tgsi_src->Absolute) {
797 if (src_is_float)
798 def = nir_fabs(b, def);
799 else
800 def = nir_iabs(b, def);
801 }
802
803 if (tgsi_src->Negate) {
804 if (src_is_float)
805 def = nir_fneg(b, def);
806 else
807 def = nir_ineg(b, def);
808 }
809
810 return def;
811 }
812
813 static void
814 ttn_alu(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
815 {
816 unsigned num_srcs = nir_op_infos[op].num_inputs;
817 nir_alu_instr *instr = nir_alu_instr_create(b->shader, op);
818 unsigned i;
819
820 for (i = 0; i < num_srcs; i++)
821 instr->src[i].src = nir_src_for_ssa(src[i]);
822
823 instr->dest = dest;
824 nir_builder_instr_insert(b, &instr->instr);
825 }
826
827 static void
828 ttn_move_dest_masked(nir_builder *b, nir_alu_dest dest,
829 nir_ssa_def *def, unsigned write_mask)
830 {
831 if (!(dest.write_mask & write_mask))
832 return;
833
834 nir_alu_instr *mov = nir_alu_instr_create(b->shader, nir_op_imov);
835 mov->dest = dest;
836 mov->dest.write_mask &= write_mask;
837 mov->src[0].src = nir_src_for_ssa(def);
838 for (unsigned i = def->num_components; i < 4; i++)
839 mov->src[0].swizzle[i] = def->num_components - 1;
840 nir_builder_instr_insert(b, &mov->instr);
841 }
842
843 static void
844 ttn_move_dest(nir_builder *b, nir_alu_dest dest, nir_ssa_def *def)
845 {
846 ttn_move_dest_masked(b, dest, def, TGSI_WRITEMASK_XYZW);
847 }
848
849 static void
850 ttn_arl(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
851 {
852 ttn_move_dest(b, dest, nir_f2i32(b, nir_ffloor(b, src[0])));
853 }
854
855 /* EXP - Approximate Exponential Base 2
856 * dst.x = 2^{\lfloor src.x\rfloor}
857 * dst.y = src.x - \lfloor src.x\rfloor
858 * dst.z = 2^{src.x}
859 * dst.w = 1.0
860 */
861 static void
862 ttn_exp(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
863 {
864 nir_ssa_def *srcx = ttn_channel(b, src[0], X);
865
866 ttn_move_dest_masked(b, dest, nir_fexp2(b, nir_ffloor(b, srcx)),
867 TGSI_WRITEMASK_X);
868 ttn_move_dest_masked(b, dest, nir_fsub(b, srcx, nir_ffloor(b, srcx)),
869 TGSI_WRITEMASK_Y);
870 ttn_move_dest_masked(b, dest, nir_fexp2(b, srcx), TGSI_WRITEMASK_Z);
871 ttn_move_dest_masked(b, dest, nir_imm_float(b, 1.0), TGSI_WRITEMASK_W);
872 }
873
874 /* LOG - Approximate Logarithm Base 2
875 * dst.x = \lfloor\log_2{|src.x|}\rfloor
876 * dst.y = \frac{|src.x|}{2^{\lfloor\log_2{|src.x|}\rfloor}}
877 * dst.z = \log_2{|src.x|}
878 * dst.w = 1.0
879 */
880 static void
881 ttn_log(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
882 {
883 nir_ssa_def *abs_srcx = nir_fabs(b, ttn_channel(b, src[0], X));
884 nir_ssa_def *log2 = nir_flog2(b, abs_srcx);
885
886 ttn_move_dest_masked(b, dest, nir_ffloor(b, log2), TGSI_WRITEMASK_X);
887 ttn_move_dest_masked(b, dest,
888 nir_fdiv(b, abs_srcx, nir_fexp2(b, nir_ffloor(b, log2))),
889 TGSI_WRITEMASK_Y);
890 ttn_move_dest_masked(b, dest, nir_flog2(b, abs_srcx), TGSI_WRITEMASK_Z);
891 ttn_move_dest_masked(b, dest, nir_imm_float(b, 1.0), TGSI_WRITEMASK_W);
892 }
893
894 /* DST - Distance Vector
895 * dst.x = 1.0
896 * dst.y = src0.y \times src1.y
897 * dst.z = src0.z
898 * dst.w = src1.w
899 */
900 static void
901 ttn_dst(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
902 {
903 ttn_move_dest_masked(b, dest, nir_imm_float(b, 1.0), TGSI_WRITEMASK_X);
904 ttn_move_dest_masked(b, dest, nir_fmul(b, src[0], src[1]), TGSI_WRITEMASK_Y);
905 ttn_move_dest_masked(b, dest, nir_fmov(b, src[0]), TGSI_WRITEMASK_Z);
906 ttn_move_dest_masked(b, dest, nir_fmov(b, src[1]), TGSI_WRITEMASK_W);
907 }
908
909 /* LIT - Light Coefficients
910 * dst.x = 1.0
911 * dst.y = max(src.x, 0.0)
912 * dst.z = (src.x > 0.0) ? max(src.y, 0.0)^{clamp(src.w, -128.0, 128.0))} : 0
913 * dst.w = 1.0
914 */
915 static void
916 ttn_lit(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
917 {
918 ttn_move_dest_masked(b, dest, nir_imm_float(b, 1.0), TGSI_WRITEMASK_XW);
919
920 ttn_move_dest_masked(b, dest, nir_fmax(b, ttn_channel(b, src[0], X),
921 nir_imm_float(b, 0.0)), TGSI_WRITEMASK_Y);
922
923 if (dest.write_mask & TGSI_WRITEMASK_Z) {
924 nir_ssa_def *src0_y = ttn_channel(b, src[0], Y);
925 nir_ssa_def *wclamp = nir_fmax(b, nir_fmin(b, ttn_channel(b, src[0], W),
926 nir_imm_float(b, 128.0)),
927 nir_imm_float(b, -128.0));
928 nir_ssa_def *pow = nir_fpow(b, nir_fmax(b, src0_y, nir_imm_float(b, 0.0)),
929 wclamp);
930
931 ttn_move_dest_masked(b, dest,
932 nir_bcsel(b,
933 nir_fge(b,
934 nir_imm_float(b, 0.0),
935 ttn_channel(b, src[0], X)),
936 nir_imm_float(b, 0.0),
937 pow),
938 TGSI_WRITEMASK_Z);
939 }
940 }
941
942 static void
943 ttn_sle(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
944 {
945 ttn_move_dest(b, dest, nir_sge(b, src[1], src[0]));
946 }
947
948 static void
949 ttn_sgt(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
950 {
951 ttn_move_dest(b, dest, nir_slt(b, src[1], src[0]));
952 }
953
954 static void
955 ttn_dp2(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
956 {
957 ttn_move_dest(b, dest, nir_fdot2(b, src[0], src[1]));
958 }
959
960 static void
961 ttn_dp3(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
962 {
963 ttn_move_dest(b, dest, nir_fdot3(b, src[0], src[1]));
964 }
965
966 static void
967 ttn_dp4(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
968 {
969 ttn_move_dest(b, dest, nir_fdot4(b, src[0], src[1]));
970 }
971
972 static void
973 ttn_umad(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
974 {
975 ttn_move_dest(b, dest, nir_iadd(b, nir_imul(b, src[0], src[1]), src[2]));
976 }
977
978 static void
979 ttn_arr(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
980 {
981 ttn_move_dest(b, dest, nir_ffloor(b, nir_fadd(b, src[0], nir_imm_float(b, 0.5))));
982 }
983
984 static void
985 ttn_cmp(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
986 {
987 ttn_move_dest(b, dest, nir_bcsel(b,
988 nir_flt(b, src[0], nir_imm_float(b, 0.0)),
989 src[1], src[2]));
990 }
991
992 static void
993 ttn_ucmp(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
994 {
995 ttn_move_dest(b, dest, nir_bcsel(b,
996 nir_ine(b, src[0], nir_imm_int(b, 0)),
997 src[1], src[2]));
998 }
999
1000 static void
1001 ttn_kill(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
1002 {
1003 nir_intrinsic_instr *discard =
1004 nir_intrinsic_instr_create(b->shader, nir_intrinsic_discard);
1005 nir_builder_instr_insert(b, &discard->instr);
1006 b->shader->info.fs.uses_discard = true;
1007 }
1008
1009 static void
1010 ttn_kill_if(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
1011 {
1012 nir_ssa_def *cmp = nir_bany_inequal4(b, nir_flt(b, src[0],
1013 nir_imm_float(b, 0.0)),
1014 nir_imm_int(b, 0));
1015 nir_intrinsic_instr *discard =
1016 nir_intrinsic_instr_create(b->shader, nir_intrinsic_discard_if);
1017 discard->src[0] = nir_src_for_ssa(cmp);
1018 nir_builder_instr_insert(b, &discard->instr);
1019 b->shader->info.fs.uses_discard = true;
1020 }
1021
1022 static void
1023 ttn_if(struct ttn_compile *c, nir_ssa_def *src, bool is_uint)
1024 {
1025 nir_builder *b = &c->build;
1026
1027 src = ttn_channel(b, src, X);
1028
1029 nir_if *if_stmt = nir_if_create(b->shader);
1030 if (is_uint) {
1031 if_stmt->condition = nir_src_for_ssa(nir_ine(b, src, nir_imm_int(b, 0)));
1032 } else {
1033 if_stmt->condition = nir_src_for_ssa(nir_fne(b, src, nir_imm_int(b, 0)));
1034 }
1035 nir_builder_cf_insert(b, &if_stmt->cf_node);
1036
1037 c->if_stack[c->if_stack_pos] = nir_after_cf_node(&if_stmt->cf_node);
1038 c->if_stack_pos++;
1039
1040 b->cursor = nir_after_cf_list(&if_stmt->then_list);
1041
1042 c->if_stack[c->if_stack_pos] = nir_after_cf_list(&if_stmt->else_list);
1043 c->if_stack_pos++;
1044 }
1045
1046 static void
1047 ttn_else(struct ttn_compile *c)
1048 {
1049 nir_builder *b = &c->build;
1050
1051 b->cursor = c->if_stack[c->if_stack_pos - 1];
1052 }
1053
1054 static void
1055 ttn_endif(struct ttn_compile *c)
1056 {
1057 nir_builder *b = &c->build;
1058
1059 c->if_stack_pos -= 2;
1060 b->cursor = c->if_stack[c->if_stack_pos];
1061 }
1062
1063 static void
1064 ttn_bgnloop(struct ttn_compile *c)
1065 {
1066 nir_builder *b = &c->build;
1067
1068 nir_loop *loop = nir_loop_create(b->shader);
1069 nir_builder_cf_insert(b, &loop->cf_node);
1070
1071 c->loop_stack[c->loop_stack_pos] = nir_after_cf_node(&loop->cf_node);
1072 c->loop_stack_pos++;
1073
1074 b->cursor = nir_after_cf_list(&loop->body);
1075 }
1076
1077 static void
1078 ttn_cont(nir_builder *b)
1079 {
1080 nir_jump_instr *instr = nir_jump_instr_create(b->shader, nir_jump_continue);
1081 nir_builder_instr_insert(b, &instr->instr);
1082 }
1083
1084 static void
1085 ttn_brk(nir_builder *b)
1086 {
1087 nir_jump_instr *instr = nir_jump_instr_create(b->shader, nir_jump_break);
1088 nir_builder_instr_insert(b, &instr->instr);
1089 }
1090
1091 static void
1092 ttn_endloop(struct ttn_compile *c)
1093 {
1094 nir_builder *b = &c->build;
1095
1096 c->loop_stack_pos--;
1097 b->cursor = c->loop_stack[c->loop_stack_pos];
1098 }
1099
1100 static void
1101 setup_texture_info(nir_tex_instr *instr, unsigned texture)
1102 {
1103 switch (texture) {
1104 case TGSI_TEXTURE_BUFFER:
1105 instr->sampler_dim = GLSL_SAMPLER_DIM_BUF;
1106 break;
1107 case TGSI_TEXTURE_1D:
1108 instr->sampler_dim = GLSL_SAMPLER_DIM_1D;
1109 break;
1110 case TGSI_TEXTURE_1D_ARRAY:
1111 instr->sampler_dim = GLSL_SAMPLER_DIM_1D;
1112 instr->is_array = true;
1113 break;
1114 case TGSI_TEXTURE_SHADOW1D:
1115 instr->sampler_dim = GLSL_SAMPLER_DIM_1D;
1116 instr->is_shadow = true;
1117 break;
1118 case TGSI_TEXTURE_SHADOW1D_ARRAY:
1119 instr->sampler_dim = GLSL_SAMPLER_DIM_1D;
1120 instr->is_shadow = true;
1121 instr->is_array = true;
1122 break;
1123 case TGSI_TEXTURE_2D:
1124 instr->sampler_dim = GLSL_SAMPLER_DIM_2D;
1125 break;
1126 case TGSI_TEXTURE_2D_ARRAY:
1127 instr->sampler_dim = GLSL_SAMPLER_DIM_2D;
1128 instr->is_array = true;
1129 break;
1130 case TGSI_TEXTURE_2D_MSAA:
1131 instr->sampler_dim = GLSL_SAMPLER_DIM_MS;
1132 break;
1133 case TGSI_TEXTURE_2D_ARRAY_MSAA:
1134 instr->sampler_dim = GLSL_SAMPLER_DIM_MS;
1135 instr->is_array = true;
1136 break;
1137 case TGSI_TEXTURE_SHADOW2D:
1138 instr->sampler_dim = GLSL_SAMPLER_DIM_2D;
1139 instr->is_shadow = true;
1140 break;
1141 case TGSI_TEXTURE_SHADOW2D_ARRAY:
1142 instr->sampler_dim = GLSL_SAMPLER_DIM_2D;
1143 instr->is_shadow = true;
1144 instr->is_array = true;
1145 break;
1146 case TGSI_TEXTURE_3D:
1147 instr->sampler_dim = GLSL_SAMPLER_DIM_3D;
1148 break;
1149 case TGSI_TEXTURE_CUBE:
1150 instr->sampler_dim = GLSL_SAMPLER_DIM_CUBE;
1151 break;
1152 case TGSI_TEXTURE_CUBE_ARRAY:
1153 instr->sampler_dim = GLSL_SAMPLER_DIM_CUBE;
1154 instr->is_array = true;
1155 break;
1156 case TGSI_TEXTURE_SHADOWCUBE:
1157 instr->sampler_dim = GLSL_SAMPLER_DIM_CUBE;
1158 instr->is_shadow = true;
1159 break;
1160 case TGSI_TEXTURE_SHADOWCUBE_ARRAY:
1161 instr->sampler_dim = GLSL_SAMPLER_DIM_CUBE;
1162 instr->is_shadow = true;
1163 instr->is_array = true;
1164 break;
1165 case TGSI_TEXTURE_RECT:
1166 instr->sampler_dim = GLSL_SAMPLER_DIM_RECT;
1167 break;
1168 case TGSI_TEXTURE_SHADOWRECT:
1169 instr->sampler_dim = GLSL_SAMPLER_DIM_RECT;
1170 instr->is_shadow = true;
1171 break;
1172 default:
1173 fprintf(stderr, "Unknown TGSI texture target %d\n", texture);
1174 abort();
1175 }
1176 }
1177
1178 static void
1179 ttn_tex(struct ttn_compile *c, nir_alu_dest dest, nir_ssa_def **src)
1180 {
1181 nir_builder *b = &c->build;
1182 struct tgsi_full_instruction *tgsi_inst = &c->token->FullInstruction;
1183 nir_tex_instr *instr;
1184 nir_texop op;
1185 unsigned num_srcs, samp = 1, sview, i;
1186
1187 switch (tgsi_inst->Instruction.Opcode) {
1188 case TGSI_OPCODE_TEX:
1189 op = nir_texop_tex;
1190 num_srcs = 1;
1191 break;
1192 case TGSI_OPCODE_TEX2:
1193 op = nir_texop_tex;
1194 num_srcs = 1;
1195 samp = 2;
1196 break;
1197 case TGSI_OPCODE_TXP:
1198 op = nir_texop_tex;
1199 num_srcs = 2;
1200 break;
1201 case TGSI_OPCODE_TXB:
1202 op = nir_texop_txb;
1203 num_srcs = 2;
1204 break;
1205 case TGSI_OPCODE_TXB2:
1206 op = nir_texop_txb;
1207 num_srcs = 2;
1208 samp = 2;
1209 break;
1210 case TGSI_OPCODE_TXL:
1211 op = nir_texop_txl;
1212 num_srcs = 2;
1213 break;
1214 case TGSI_OPCODE_TXL2:
1215 op = nir_texop_txl;
1216 num_srcs = 2;
1217 samp = 2;
1218 break;
1219 case TGSI_OPCODE_TXF:
1220 if (tgsi_inst->Texture.Texture == TGSI_TEXTURE_2D_MSAA ||
1221 tgsi_inst->Texture.Texture == TGSI_TEXTURE_2D_ARRAY_MSAA) {
1222 op = nir_texop_txf_ms;
1223 } else {
1224 op = nir_texop_txf;
1225 }
1226 num_srcs = 2;
1227 break;
1228 case TGSI_OPCODE_TXD:
1229 op = nir_texop_txd;
1230 num_srcs = 3;
1231 samp = 3;
1232 break;
1233 case TGSI_OPCODE_LODQ:
1234 op = nir_texop_lod;
1235 num_srcs = 1;
1236 break;
1237
1238 default:
1239 fprintf(stderr, "unknown TGSI tex op %d\n", tgsi_inst->Instruction.Opcode);
1240 abort();
1241 }
1242
1243 if (tgsi_inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D ||
1244 tgsi_inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D_ARRAY ||
1245 tgsi_inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D ||
1246 tgsi_inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D_ARRAY ||
1247 tgsi_inst->Texture.Texture == TGSI_TEXTURE_SHADOWRECT ||
1248 tgsi_inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE ||
1249 tgsi_inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY) {
1250 num_srcs++;
1251 }
1252
1253 num_srcs += tgsi_inst->Texture.NumOffsets;
1254
1255 instr = nir_tex_instr_create(b->shader, num_srcs);
1256 instr->op = op;
1257
1258 setup_texture_info(instr, tgsi_inst->Texture.Texture);
1259
1260 switch (instr->sampler_dim) {
1261 case GLSL_SAMPLER_DIM_1D:
1262 case GLSL_SAMPLER_DIM_BUF:
1263 instr->coord_components = 1;
1264 break;
1265 case GLSL_SAMPLER_DIM_2D:
1266 case GLSL_SAMPLER_DIM_RECT:
1267 case GLSL_SAMPLER_DIM_EXTERNAL:
1268 case GLSL_SAMPLER_DIM_MS:
1269 instr->coord_components = 2;
1270 break;
1271 case GLSL_SAMPLER_DIM_3D:
1272 case GLSL_SAMPLER_DIM_CUBE:
1273 instr->coord_components = 3;
1274 break;
1275 case GLSL_SAMPLER_DIM_SUBPASS:
1276 case GLSL_SAMPLER_DIM_SUBPASS_MS:
1277 unreachable("invalid sampler_dim");
1278 }
1279
1280 if (instr->is_array)
1281 instr->coord_components++;
1282
1283 assert(tgsi_inst->Src[samp].Register.File == TGSI_FILE_SAMPLER);
1284 instr->texture_index = tgsi_inst->Src[samp].Register.Index;
1285 instr->sampler_index = tgsi_inst->Src[samp].Register.Index;
1286
1287 /* TODO if we supported any opc's which take an explicit SVIEW
1288 * src, we would use that here instead. But for the "legacy"
1289 * texture opc's the SVIEW index is same as SAMP index:
1290 */
1291 sview = instr->texture_index;
1292
1293 if (op == nir_texop_lod) {
1294 instr->dest_type = nir_type_float;
1295 } else if (sview < c->num_samp_types) {
1296 instr->dest_type = c->samp_types[sview];
1297 } else {
1298 instr->dest_type = nir_type_float;
1299 }
1300
1301 unsigned src_number = 0;
1302
1303 instr->src[src_number].src =
1304 nir_src_for_ssa(nir_swizzle(b, src[0], SWIZ(X, Y, Z, W),
1305 instr->coord_components, false));
1306 instr->src[src_number].src_type = nir_tex_src_coord;
1307 src_number++;
1308
1309 if (tgsi_inst->Instruction.Opcode == TGSI_OPCODE_TXP) {
1310 instr->src[src_number].src = nir_src_for_ssa(ttn_channel(b, src[0], W));
1311 instr->src[src_number].src_type = nir_tex_src_projector;
1312 src_number++;
1313 }
1314
1315 if (tgsi_inst->Instruction.Opcode == TGSI_OPCODE_TXB) {
1316 instr->src[src_number].src = nir_src_for_ssa(ttn_channel(b, src[0], W));
1317 instr->src[src_number].src_type = nir_tex_src_bias;
1318 src_number++;
1319 }
1320
1321 if (tgsi_inst->Instruction.Opcode == TGSI_OPCODE_TXB2) {
1322 instr->src[src_number].src = nir_src_for_ssa(ttn_channel(b, src[1], X));
1323 instr->src[src_number].src_type = nir_tex_src_bias;
1324 src_number++;
1325 }
1326
1327 if (tgsi_inst->Instruction.Opcode == TGSI_OPCODE_TXL) {
1328 instr->src[src_number].src = nir_src_for_ssa(ttn_channel(b, src[0], W));
1329 instr->src[src_number].src_type = nir_tex_src_lod;
1330 src_number++;
1331 }
1332
1333 if (tgsi_inst->Instruction.Opcode == TGSI_OPCODE_TXL2) {
1334 instr->src[src_number].src = nir_src_for_ssa(ttn_channel(b, src[1], X));
1335 instr->src[src_number].src_type = nir_tex_src_lod;
1336 src_number++;
1337 }
1338
1339 if (tgsi_inst->Instruction.Opcode == TGSI_OPCODE_TXF) {
1340 instr->src[src_number].src = nir_src_for_ssa(ttn_channel(b, src[0], W));
1341 if (op == nir_texop_txf_ms)
1342 instr->src[src_number].src_type = nir_tex_src_ms_index;
1343 else
1344 instr->src[src_number].src_type = nir_tex_src_lod;
1345 src_number++;
1346 }
1347
1348 if (tgsi_inst->Instruction.Opcode == TGSI_OPCODE_TXD) {
1349 instr->src[src_number].src_type = nir_tex_src_ddx;
1350 instr->src[src_number].src =
1351 nir_src_for_ssa(nir_swizzle(b, src[1], SWIZ(X, Y, Z, W),
1352 nir_tex_instr_src_size(instr, src_number),
1353 false));
1354 src_number++;
1355 instr->src[src_number].src_type = nir_tex_src_ddy;
1356 instr->src[src_number].src =
1357 nir_src_for_ssa(nir_swizzle(b, src[2], SWIZ(X, Y, Z, W),
1358 nir_tex_instr_src_size(instr, src_number),
1359 false));
1360 src_number++;
1361 }
1362
1363 if (instr->is_shadow) {
1364 if (instr->coord_components == 4)
1365 instr->src[src_number].src = nir_src_for_ssa(ttn_channel(b, src[1], X));
1366 else if (instr->coord_components == 3)
1367 instr->src[src_number].src = nir_src_for_ssa(ttn_channel(b, src[0], W));
1368 else
1369 instr->src[src_number].src = nir_src_for_ssa(ttn_channel(b, src[0], Z));
1370
1371 instr->src[src_number].src_type = nir_tex_src_comparator;
1372 src_number++;
1373 }
1374
1375 for (i = 0; i < tgsi_inst->Texture.NumOffsets; i++) {
1376 struct tgsi_texture_offset *tex_offset = &tgsi_inst->TexOffsets[i];
1377 /* since TexOffset ins't using tgsi_full_src_register we get to
1378 * do some extra gymnastics:
1379 */
1380 nir_alu_src src;
1381
1382 memset(&src, 0, sizeof(src));
1383
1384 src.src = ttn_src_for_file_and_index(c,
1385 tex_offset->File,
1386 tex_offset->Index,
1387 NULL, NULL, NULL);
1388
1389 src.swizzle[0] = tex_offset->SwizzleX;
1390 src.swizzle[1] = tex_offset->SwizzleY;
1391 src.swizzle[2] = tex_offset->SwizzleZ;
1392 src.swizzle[3] = TGSI_SWIZZLE_W;
1393
1394 instr->src[src_number].src_type = nir_tex_src_offset;
1395 instr->src[src_number].src = nir_src_for_ssa(
1396 nir_fmov_alu(b, src, nir_tex_instr_src_size(instr, src_number)));
1397 src_number++;
1398 }
1399
1400 assert(src_number == num_srcs);
1401
1402 nir_ssa_dest_init(&instr->instr, &instr->dest,
1403 nir_tex_instr_dest_size(instr),
1404 32, NULL);
1405 nir_builder_instr_insert(b, &instr->instr);
1406
1407 /* Resolve the writemask on the texture op. */
1408 ttn_move_dest(b, dest, &instr->dest.ssa);
1409 }
1410
1411 /* TGSI_OPCODE_TXQ is actually two distinct operations:
1412 *
1413 * dst.x = texture\_width(unit, lod)
1414 * dst.y = texture\_height(unit, lod)
1415 * dst.z = texture\_depth(unit, lod)
1416 * dst.w = texture\_levels(unit)
1417 *
1418 * dst.xyz map to NIR txs opcode, and dst.w maps to query_levels
1419 */
1420 static void
1421 ttn_txq(struct ttn_compile *c, nir_alu_dest dest, nir_ssa_def **src)
1422 {
1423 nir_builder *b = &c->build;
1424 struct tgsi_full_instruction *tgsi_inst = &c->token->FullInstruction;
1425 nir_tex_instr *txs, *qlv;
1426
1427 txs = nir_tex_instr_create(b->shader, 1);
1428 txs->op = nir_texop_txs;
1429 setup_texture_info(txs, tgsi_inst->Texture.Texture);
1430
1431 qlv = nir_tex_instr_create(b->shader, 0);
1432 qlv->op = nir_texop_query_levels;
1433 setup_texture_info(qlv, tgsi_inst->Texture.Texture);
1434
1435 assert(tgsi_inst->Src[1].Register.File == TGSI_FILE_SAMPLER);
1436 txs->texture_index = tgsi_inst->Src[1].Register.Index;
1437 qlv->texture_index = tgsi_inst->Src[1].Register.Index;
1438
1439 /* only single src, the lod: */
1440 txs->src[0].src = nir_src_for_ssa(ttn_channel(b, src[0], X));
1441 txs->src[0].src_type = nir_tex_src_lod;
1442
1443 nir_ssa_dest_init(&txs->instr, &txs->dest,
1444 nir_tex_instr_dest_size(txs), 32, NULL);
1445 nir_builder_instr_insert(b, &txs->instr);
1446
1447 nir_ssa_dest_init(&qlv->instr, &qlv->dest, 1, 32, NULL);
1448 nir_builder_instr_insert(b, &qlv->instr);
1449
1450 ttn_move_dest_masked(b, dest, &txs->dest.ssa, TGSI_WRITEMASK_XYZ);
1451 ttn_move_dest_masked(b, dest, &qlv->dest.ssa, TGSI_WRITEMASK_W);
1452 }
1453
1454 static const nir_op op_trans[TGSI_OPCODE_LAST] = {
1455 [TGSI_OPCODE_ARL] = 0,
1456 [TGSI_OPCODE_MOV] = nir_op_fmov,
1457 [TGSI_OPCODE_LIT] = 0,
1458 [TGSI_OPCODE_RCP] = nir_op_frcp,
1459 [TGSI_OPCODE_RSQ] = nir_op_frsq,
1460 [TGSI_OPCODE_EXP] = 0,
1461 [TGSI_OPCODE_LOG] = 0,
1462 [TGSI_OPCODE_MUL] = nir_op_fmul,
1463 [TGSI_OPCODE_ADD] = nir_op_fadd,
1464 [TGSI_OPCODE_DP3] = 0,
1465 [TGSI_OPCODE_DP4] = 0,
1466 [TGSI_OPCODE_DST] = 0,
1467 [TGSI_OPCODE_MIN] = nir_op_fmin,
1468 [TGSI_OPCODE_MAX] = nir_op_fmax,
1469 [TGSI_OPCODE_SLT] = nir_op_slt,
1470 [TGSI_OPCODE_SGE] = nir_op_sge,
1471 [TGSI_OPCODE_MAD] = nir_op_ffma,
1472 [TGSI_OPCODE_LRP] = 0,
1473 [TGSI_OPCODE_SQRT] = nir_op_fsqrt,
1474 [TGSI_OPCODE_FRC] = nir_op_ffract,
1475 [TGSI_OPCODE_FLR] = nir_op_ffloor,
1476 [TGSI_OPCODE_ROUND] = nir_op_fround_even,
1477 [TGSI_OPCODE_EX2] = nir_op_fexp2,
1478 [TGSI_OPCODE_LG2] = nir_op_flog2,
1479 [TGSI_OPCODE_POW] = nir_op_fpow,
1480 [TGSI_OPCODE_COS] = nir_op_fcos,
1481 [TGSI_OPCODE_DDX] = nir_op_fddx,
1482 [TGSI_OPCODE_DDY] = nir_op_fddy,
1483 [TGSI_OPCODE_KILL] = 0,
1484 [TGSI_OPCODE_PK2H] = 0, /* XXX */
1485 [TGSI_OPCODE_PK2US] = 0, /* XXX */
1486 [TGSI_OPCODE_PK4B] = 0, /* XXX */
1487 [TGSI_OPCODE_PK4UB] = 0, /* XXX */
1488 [TGSI_OPCODE_SEQ] = nir_op_seq,
1489 [TGSI_OPCODE_SGT] = 0,
1490 [TGSI_OPCODE_SIN] = nir_op_fsin,
1491 [TGSI_OPCODE_SNE] = nir_op_sne,
1492 [TGSI_OPCODE_SLE] = 0,
1493 [TGSI_OPCODE_TEX] = 0,
1494 [TGSI_OPCODE_TXD] = 0,
1495 [TGSI_OPCODE_TXP] = 0,
1496 [TGSI_OPCODE_UP2H] = 0, /* XXX */
1497 [TGSI_OPCODE_UP2US] = 0, /* XXX */
1498 [TGSI_OPCODE_UP4B] = 0, /* XXX */
1499 [TGSI_OPCODE_UP4UB] = 0, /* XXX */
1500 [TGSI_OPCODE_ARR] = 0,
1501
1502 /* No function calls, yet. */
1503 [TGSI_OPCODE_CAL] = 0, /* XXX */
1504 [TGSI_OPCODE_RET] = 0, /* XXX */
1505
1506 [TGSI_OPCODE_SSG] = nir_op_fsign,
1507 [TGSI_OPCODE_CMP] = 0,
1508 [TGSI_OPCODE_TXB] = 0,
1509 [TGSI_OPCODE_DIV] = nir_op_fdiv,
1510 [TGSI_OPCODE_DP2] = 0,
1511 [TGSI_OPCODE_TXL] = 0,
1512
1513 [TGSI_OPCODE_BRK] = 0,
1514 [TGSI_OPCODE_IF] = 0,
1515 [TGSI_OPCODE_UIF] = 0,
1516 [TGSI_OPCODE_ELSE] = 0,
1517 [TGSI_OPCODE_ENDIF] = 0,
1518
1519 [TGSI_OPCODE_DDX_FINE] = nir_op_fddx_fine,
1520 [TGSI_OPCODE_DDY_FINE] = nir_op_fddy_fine,
1521
1522 [TGSI_OPCODE_CEIL] = nir_op_fceil,
1523 [TGSI_OPCODE_I2F] = nir_op_i2f32,
1524 [TGSI_OPCODE_NOT] = nir_op_inot,
1525 [TGSI_OPCODE_TRUNC] = nir_op_ftrunc,
1526 [TGSI_OPCODE_SHL] = nir_op_ishl,
1527 [TGSI_OPCODE_AND] = nir_op_iand,
1528 [TGSI_OPCODE_OR] = nir_op_ior,
1529 [TGSI_OPCODE_MOD] = nir_op_umod,
1530 [TGSI_OPCODE_XOR] = nir_op_ixor,
1531 [TGSI_OPCODE_TXF] = 0,
1532 [TGSI_OPCODE_TXQ] = 0,
1533
1534 [TGSI_OPCODE_CONT] = 0,
1535
1536 [TGSI_OPCODE_EMIT] = 0, /* XXX */
1537 [TGSI_OPCODE_ENDPRIM] = 0, /* XXX */
1538
1539 [TGSI_OPCODE_BGNLOOP] = 0,
1540 [TGSI_OPCODE_BGNSUB] = 0, /* XXX: no function calls */
1541 [TGSI_OPCODE_ENDLOOP] = 0,
1542 [TGSI_OPCODE_ENDSUB] = 0, /* XXX: no function calls */
1543
1544 [TGSI_OPCODE_NOP] = 0,
1545 [TGSI_OPCODE_FSEQ] = nir_op_feq,
1546 [TGSI_OPCODE_FSGE] = nir_op_fge,
1547 [TGSI_OPCODE_FSLT] = nir_op_flt,
1548 [TGSI_OPCODE_FSNE] = nir_op_fne,
1549
1550 [TGSI_OPCODE_KILL_IF] = 0,
1551
1552 [TGSI_OPCODE_END] = 0,
1553
1554 [TGSI_OPCODE_F2I] = nir_op_f2i32,
1555 [TGSI_OPCODE_IDIV] = nir_op_idiv,
1556 [TGSI_OPCODE_IMAX] = nir_op_imax,
1557 [TGSI_OPCODE_IMIN] = nir_op_imin,
1558 [TGSI_OPCODE_INEG] = nir_op_ineg,
1559 [TGSI_OPCODE_ISGE] = nir_op_ige,
1560 [TGSI_OPCODE_ISHR] = nir_op_ishr,
1561 [TGSI_OPCODE_ISLT] = nir_op_ilt,
1562 [TGSI_OPCODE_F2U] = nir_op_f2u32,
1563 [TGSI_OPCODE_U2F] = nir_op_u2f32,
1564 [TGSI_OPCODE_UADD] = nir_op_iadd,
1565 [TGSI_OPCODE_UDIV] = nir_op_udiv,
1566 [TGSI_OPCODE_UMAD] = 0,
1567 [TGSI_OPCODE_UMAX] = nir_op_umax,
1568 [TGSI_OPCODE_UMIN] = nir_op_umin,
1569 [TGSI_OPCODE_UMOD] = nir_op_umod,
1570 [TGSI_OPCODE_UMUL] = nir_op_imul,
1571 [TGSI_OPCODE_USEQ] = nir_op_ieq,
1572 [TGSI_OPCODE_USGE] = nir_op_uge,
1573 [TGSI_OPCODE_USHR] = nir_op_ushr,
1574 [TGSI_OPCODE_USLT] = nir_op_ult,
1575 [TGSI_OPCODE_USNE] = nir_op_ine,
1576
1577 [TGSI_OPCODE_SWITCH] = 0, /* not emitted by glsl_to_tgsi.cpp */
1578 [TGSI_OPCODE_CASE] = 0, /* not emitted by glsl_to_tgsi.cpp */
1579 [TGSI_OPCODE_DEFAULT] = 0, /* not emitted by glsl_to_tgsi.cpp */
1580 [TGSI_OPCODE_ENDSWITCH] = 0, /* not emitted by glsl_to_tgsi.cpp */
1581
1582 /* XXX: SAMPLE opcodes */
1583
1584 [TGSI_OPCODE_UARL] = nir_op_imov,
1585 [TGSI_OPCODE_UCMP] = 0,
1586 [TGSI_OPCODE_IABS] = nir_op_iabs,
1587 [TGSI_OPCODE_ISSG] = nir_op_isign,
1588
1589 /* XXX: atomics */
1590
1591 [TGSI_OPCODE_TEX2] = 0,
1592 [TGSI_OPCODE_TXB2] = 0,
1593 [TGSI_OPCODE_TXL2] = 0,
1594
1595 [TGSI_OPCODE_IMUL_HI] = nir_op_imul_high,
1596 [TGSI_OPCODE_UMUL_HI] = nir_op_umul_high,
1597
1598 [TGSI_OPCODE_TG4] = 0,
1599 [TGSI_OPCODE_LODQ] = 0,
1600
1601 [TGSI_OPCODE_IBFE] = nir_op_ibitfield_extract,
1602 [TGSI_OPCODE_UBFE] = nir_op_ubitfield_extract,
1603 [TGSI_OPCODE_BFI] = nir_op_bitfield_insert,
1604 [TGSI_OPCODE_BREV] = nir_op_bitfield_reverse,
1605 [TGSI_OPCODE_POPC] = nir_op_bit_count,
1606 [TGSI_OPCODE_LSB] = nir_op_find_lsb,
1607 [TGSI_OPCODE_IMSB] = nir_op_ifind_msb,
1608 [TGSI_OPCODE_UMSB] = nir_op_ufind_msb,
1609
1610 [TGSI_OPCODE_INTERP_CENTROID] = 0, /* XXX */
1611 [TGSI_OPCODE_INTERP_SAMPLE] = 0, /* XXX */
1612 [TGSI_OPCODE_INTERP_OFFSET] = 0, /* XXX */
1613 };
1614
1615 static void
1616 ttn_emit_instruction(struct ttn_compile *c)
1617 {
1618 nir_builder *b = &c->build;
1619 struct tgsi_full_instruction *tgsi_inst = &c->token->FullInstruction;
1620 unsigned i;
1621 unsigned tgsi_op = tgsi_inst->Instruction.Opcode;
1622 struct tgsi_full_dst_register *tgsi_dst = &tgsi_inst->Dst[0];
1623
1624 if (tgsi_op == TGSI_OPCODE_END)
1625 return;
1626
1627 nir_ssa_def *src[TGSI_FULL_MAX_SRC_REGISTERS];
1628 for (i = 0; i < tgsi_inst->Instruction.NumSrcRegs; i++) {
1629 src[i] = ttn_get_src(c, &tgsi_inst->Src[i], i);
1630 }
1631 nir_alu_dest dest = ttn_get_dest(c, tgsi_dst);
1632
1633 switch (tgsi_op) {
1634 case TGSI_OPCODE_RSQ:
1635 ttn_move_dest(b, dest, nir_frsq(b, ttn_channel(b, src[0], X)));
1636 break;
1637
1638 case TGSI_OPCODE_SQRT:
1639 ttn_move_dest(b, dest, nir_fsqrt(b, ttn_channel(b, src[0], X)));
1640 break;
1641
1642 case TGSI_OPCODE_RCP:
1643 ttn_move_dest(b, dest, nir_frcp(b, ttn_channel(b, src[0], X)));
1644 break;
1645
1646 case TGSI_OPCODE_EX2:
1647 ttn_move_dest(b, dest, nir_fexp2(b, ttn_channel(b, src[0], X)));
1648 break;
1649
1650 case TGSI_OPCODE_LG2:
1651 ttn_move_dest(b, dest, nir_flog2(b, ttn_channel(b, src[0], X)));
1652 break;
1653
1654 case TGSI_OPCODE_POW:
1655 ttn_move_dest(b, dest, nir_fpow(b,
1656 ttn_channel(b, src[0], X),
1657 ttn_channel(b, src[1], X)));
1658 break;
1659
1660 case TGSI_OPCODE_COS:
1661 ttn_move_dest(b, dest, nir_fcos(b, ttn_channel(b, src[0], X)));
1662 break;
1663
1664 case TGSI_OPCODE_SIN:
1665 ttn_move_dest(b, dest, nir_fsin(b, ttn_channel(b, src[0], X)));
1666 break;
1667
1668 case TGSI_OPCODE_ARL:
1669 ttn_arl(b, op_trans[tgsi_op], dest, src);
1670 break;
1671
1672 case TGSI_OPCODE_EXP:
1673 ttn_exp(b, op_trans[tgsi_op], dest, src);
1674 break;
1675
1676 case TGSI_OPCODE_LOG:
1677 ttn_log(b, op_trans[tgsi_op], dest, src);
1678 break;
1679
1680 case TGSI_OPCODE_DST:
1681 ttn_dst(b, op_trans[tgsi_op], dest, src);
1682 break;
1683
1684 case TGSI_OPCODE_LIT:
1685 ttn_lit(b, op_trans[tgsi_op], dest, src);
1686 break;
1687
1688 case TGSI_OPCODE_DP2:
1689 ttn_dp2(b, op_trans[tgsi_op], dest, src);
1690 break;
1691
1692 case TGSI_OPCODE_DP3:
1693 ttn_dp3(b, op_trans[tgsi_op], dest, src);
1694 break;
1695
1696 case TGSI_OPCODE_DP4:
1697 ttn_dp4(b, op_trans[tgsi_op], dest, src);
1698 break;
1699
1700 case TGSI_OPCODE_UMAD:
1701 ttn_umad(b, op_trans[tgsi_op], dest, src);
1702 break;
1703
1704 case TGSI_OPCODE_LRP:
1705 ttn_move_dest(b, dest, nir_flrp(b, src[2], src[1], src[0]));
1706 break;
1707
1708 case TGSI_OPCODE_KILL:
1709 ttn_kill(b, op_trans[tgsi_op], dest, src);
1710 break;
1711
1712 case TGSI_OPCODE_ARR:
1713 ttn_arr(b, op_trans[tgsi_op], dest, src);
1714 break;
1715
1716 case TGSI_OPCODE_CMP:
1717 ttn_cmp(b, op_trans[tgsi_op], dest, src);
1718 break;
1719
1720 case TGSI_OPCODE_UCMP:
1721 ttn_ucmp(b, op_trans[tgsi_op], dest, src);
1722 break;
1723
1724 case TGSI_OPCODE_SGT:
1725 ttn_sgt(b, op_trans[tgsi_op], dest, src);
1726 break;
1727
1728 case TGSI_OPCODE_SLE:
1729 ttn_sle(b, op_trans[tgsi_op], dest, src);
1730 break;
1731
1732 case TGSI_OPCODE_KILL_IF:
1733 ttn_kill_if(b, op_trans[tgsi_op], dest, src);
1734 break;
1735
1736 case TGSI_OPCODE_TEX:
1737 case TGSI_OPCODE_TXP:
1738 case TGSI_OPCODE_TXL:
1739 case TGSI_OPCODE_TXB:
1740 case TGSI_OPCODE_TXD:
1741 case TGSI_OPCODE_TEX2:
1742 case TGSI_OPCODE_TXL2:
1743 case TGSI_OPCODE_TXB2:
1744 case TGSI_OPCODE_TXF:
1745 case TGSI_OPCODE_TG4:
1746 case TGSI_OPCODE_LODQ:
1747 ttn_tex(c, dest, src);
1748 break;
1749
1750 case TGSI_OPCODE_TXQ:
1751 ttn_txq(c, dest, src);
1752 break;
1753
1754 case TGSI_OPCODE_NOP:
1755 break;
1756
1757 case TGSI_OPCODE_IF:
1758 ttn_if(c, src[0], false);
1759 break;
1760
1761 case TGSI_OPCODE_UIF:
1762 ttn_if(c, src[0], true);
1763 break;
1764
1765 case TGSI_OPCODE_ELSE:
1766 ttn_else(c);
1767 break;
1768
1769 case TGSI_OPCODE_ENDIF:
1770 ttn_endif(c);
1771 break;
1772
1773 case TGSI_OPCODE_BGNLOOP:
1774 ttn_bgnloop(c);
1775 break;
1776
1777 case TGSI_OPCODE_BRK:
1778 ttn_brk(b);
1779 break;
1780
1781 case TGSI_OPCODE_CONT:
1782 ttn_cont(b);
1783 break;
1784
1785 case TGSI_OPCODE_ENDLOOP:
1786 ttn_endloop(c);
1787 break;
1788
1789 default:
1790 if (op_trans[tgsi_op] != 0 || tgsi_op == TGSI_OPCODE_MOV) {
1791 ttn_alu(b, op_trans[tgsi_op], dest, src);
1792 } else {
1793 fprintf(stderr, "unknown TGSI opcode: %s\n",
1794 tgsi_get_opcode_name(tgsi_op));
1795 abort();
1796 }
1797 break;
1798 }
1799
1800 if (tgsi_inst->Instruction.Saturate) {
1801 assert(!dest.dest.is_ssa);
1802 ttn_move_dest(b, dest, nir_fsat(b, ttn_src_for_dest(b, &dest)));
1803 }
1804
1805 /* if the dst has a matching var, append store_var to move
1806 * output from reg to var
1807 */
1808 nir_variable *var = ttn_get_var(c, tgsi_dst);
1809 if (var) {
1810 unsigned index = tgsi_dst->Register.Index;
1811 unsigned offset = c->temp_regs[index].offset;
1812 struct tgsi_ind_register *indirect = tgsi_dst->Register.Indirect ?
1813 &tgsi_dst->Indirect : NULL;
1814 nir_src val = nir_src_for_reg(dest.dest.reg.reg);
1815 nir_store_deref(b, ttn_array_deref(c, var, offset, indirect),
1816 nir_ssa_for_src(b, val, 4), dest.write_mask);
1817 }
1818 }
1819
1820 /**
1821 * Puts a NIR intrinsic to store of each TGSI_FILE_OUTPUT value to the output
1822 * variables at the end of the shader.
1823 *
1824 * We don't generate these incrementally as the TGSI_FILE_OUTPUT values are
1825 * written, because there's no output load intrinsic, which means we couldn't
1826 * handle writemasks.
1827 */
1828 static void
1829 ttn_add_output_stores(struct ttn_compile *c)
1830 {
1831 nir_builder *b = &c->build;
1832
1833 foreach_list_typed(nir_variable, var, node, &b->shader->outputs) {
1834 unsigned array_len = MAX2(glsl_get_length(var->type), 1);
1835 unsigned i;
1836
1837 for (i = 0; i < array_len; i++) {
1838 nir_intrinsic_instr *store =
1839 nir_intrinsic_instr_create(b->shader, nir_intrinsic_store_output);
1840 unsigned loc = var->data.driver_location + i;
1841
1842 nir_src src = nir_src_for_reg(c->output_regs[loc].reg);
1843 src.reg.base_offset = c->output_regs[loc].offset;
1844
1845 if (c->build.shader->info.stage == MESA_SHADER_FRAGMENT &&
1846 var->data.location == FRAG_RESULT_DEPTH) {
1847 /* TGSI uses TGSI_SEMANTIC_POSITION.z for the depth output, while
1848 * NIR uses a single float FRAG_RESULT_DEPTH.
1849 */
1850 src = nir_src_for_ssa(nir_channel(b, nir_ssa_for_src(b, src, 4), 2));
1851 store->num_components = 1;
1852 } else {
1853 store->num_components = 4;
1854 }
1855 store->src[0] = src;
1856
1857 nir_intrinsic_set_base(store, loc);
1858 nir_intrinsic_set_write_mask(store, 0xf);
1859 store->src[1] = nir_src_for_ssa(nir_imm_int(b, 0));
1860 nir_builder_instr_insert(b, &store->instr);
1861 }
1862 }
1863 }
1864
1865 struct nir_shader *
1866 tgsi_to_nir(const void *tgsi_tokens,
1867 const nir_shader_compiler_options *options)
1868 {
1869 struct tgsi_parse_context parser;
1870 struct tgsi_shader_info scan;
1871 struct ttn_compile *c;
1872 struct nir_shader *s;
1873 int ret;
1874
1875 c = rzalloc(NULL, struct ttn_compile);
1876
1877 tgsi_scan_shader(tgsi_tokens, &scan);
1878 c->scan = &scan;
1879
1880 nir_builder_init_simple_shader(&c->build, NULL,
1881 tgsi_processor_to_shader_stage(scan.processor),
1882 options);
1883 s = c->build.shader;
1884
1885 s->num_inputs = scan.file_max[TGSI_FILE_INPUT] + 1;
1886 s->num_uniforms = scan.const_file_max[0] + 1;
1887 s->num_outputs = scan.file_max[TGSI_FILE_OUTPUT] + 1;
1888
1889 c->output_regs = rzalloc_array(c, struct ttn_reg_info,
1890 scan.file_max[TGSI_FILE_OUTPUT] + 1);
1891 c->temp_regs = rzalloc_array(c, struct ttn_reg_info,
1892 scan.file_max[TGSI_FILE_TEMPORARY] + 1);
1893 c->imm_defs = rzalloc_array(c, nir_ssa_def *,
1894 scan.file_max[TGSI_FILE_IMMEDIATE] + 1);
1895
1896 c->num_samp_types = scan.file_max[TGSI_FILE_SAMPLER_VIEW] + 1;
1897 c->samp_types = rzalloc_array(c, nir_alu_type, c->num_samp_types);
1898
1899 c->if_stack = rzalloc_array(c, nir_cursor,
1900 (scan.opcode_count[TGSI_OPCODE_IF] +
1901 scan.opcode_count[TGSI_OPCODE_UIF]) * 2);
1902 c->loop_stack = rzalloc_array(c, nir_cursor,
1903 scan.opcode_count[TGSI_OPCODE_BGNLOOP]);
1904
1905 ret = tgsi_parse_init(&parser, tgsi_tokens);
1906 assert(ret == TGSI_PARSE_OK);
1907
1908 while (!tgsi_parse_end_of_tokens(&parser)) {
1909 tgsi_parse_token(&parser);
1910 c->token = &parser.FullToken;
1911
1912 switch (parser.FullToken.Token.Type) {
1913 case TGSI_TOKEN_TYPE_DECLARATION:
1914 ttn_emit_declaration(c);
1915 break;
1916
1917 case TGSI_TOKEN_TYPE_INSTRUCTION:
1918 ttn_emit_instruction(c);
1919 break;
1920
1921 case TGSI_TOKEN_TYPE_IMMEDIATE:
1922 ttn_emit_immediate(c);
1923 break;
1924 }
1925 }
1926
1927 tgsi_parse_free(&parser);
1928
1929 ttn_add_output_stores(c);
1930
1931 ralloc_free(c);
1932 return s;
1933 }