4130697e2a7db300d8d632f5a8e42fe641e67249
[mesa.git] / src / gallium / auxiliary / nir / tgsi_to_nir.c
1 /*
2 * Copyright © 2014-2015 Broadcom
3 * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 #include "util/ralloc.h"
26 #include "glsl/nir/nir.h"
27 #include "glsl/nir/nir_builder.h"
28 #include "glsl/list.h"
29 #include "glsl/shader_enums.h"
30
31 #include "nir/tgsi_to_nir.h"
32 #include "tgsi/tgsi_parse.h"
33 #include "tgsi/tgsi_dump.h"
34 #include "tgsi/tgsi_info.h"
35 #include "tgsi/tgsi_scan.h"
36
37 #define SWIZ(X, Y, Z, W) (unsigned[4]){ \
38 TGSI_SWIZZLE_##X, \
39 TGSI_SWIZZLE_##Y, \
40 TGSI_SWIZZLE_##Z, \
41 TGSI_SWIZZLE_##W, \
42 }
43
44 struct ttn_reg_info {
45 /** nir register containing this TGSI index. */
46 nir_register *reg;
47 nir_variable *var;
48 /** Offset (in vec4s) from the start of var for this TGSI index. */
49 int offset;
50 };
51
52 struct ttn_compile {
53 union tgsi_full_token *token;
54 nir_builder build;
55 struct tgsi_shader_info *scan;
56
57 struct ttn_reg_info *output_regs;
58 struct ttn_reg_info *temp_regs;
59 nir_ssa_def **imm_defs;
60
61 unsigned num_samp_types;
62 nir_alu_type *samp_types;
63
64 nir_register *addr_reg;
65
66 /**
67 * Stack of cf_node_lists where instructions should be pushed as we pop
68 * back out of the control flow stack.
69 *
70 * For each IF/ELSE/ENDIF block, if_stack[if_stack_pos] has where the else
71 * instructions should be placed, and if_stack[if_stack_pos - 1] has where
72 * the next instructions outside of the if/then/else block go.
73 */
74 struct exec_list **if_stack;
75 unsigned if_stack_pos;
76
77 /**
78 * Stack of cf_node_lists where instructions should be pushed as we pop
79 * back out of the control flow stack.
80 *
81 * loop_stack[loop_stack_pos - 1] contains the cf_node_list for the outside
82 * of the loop.
83 */
84 struct exec_list **loop_stack;
85 unsigned loop_stack_pos;
86
87 /* How many TGSI_FILE_IMMEDIATE vec4s have been parsed so far. */
88 unsigned next_imm;
89 };
90
91 #define ttn_swizzle(b, src, x, y, z, w) \
92 nir_swizzle(b, src, SWIZ(x, y, z, w), 4, false)
93 #define ttn_channel(b, src, swiz) \
94 nir_swizzle(b, src, SWIZ(swiz, swiz, swiz, swiz), 1, false)
95
96 static nir_ssa_def *
97 ttn_src_for_dest(nir_builder *b, nir_alu_dest *dest)
98 {
99 nir_alu_src src;
100 memset(&src, 0, sizeof(src));
101
102 if (dest->dest.is_ssa)
103 src.src = nir_src_for_ssa(&dest->dest.ssa);
104 else {
105 assert(!dest->dest.reg.indirect);
106 src.src = nir_src_for_reg(dest->dest.reg.reg);
107 src.src.reg.base_offset = dest->dest.reg.base_offset;
108 }
109
110 for (int i = 0; i < 4; i++)
111 src.swizzle[i] = i;
112
113 return nir_fmov_alu(b, src, 4);
114 }
115
116 static void
117 ttn_emit_declaration(struct ttn_compile *c)
118 {
119 nir_builder *b = &c->build;
120 struct tgsi_full_declaration *decl = &c->token->FullDeclaration;
121 unsigned array_size = decl->Range.Last - decl->Range.First + 1;
122 unsigned file = decl->Declaration.File;
123 unsigned i;
124
125 if (file == TGSI_FILE_TEMPORARY) {
126 if (decl->Declaration.Array) {
127 /* for arrays, we create variables instead of registers: */
128 nir_variable *var = rzalloc(b->shader, nir_variable);
129
130 var->type = glsl_array_type(glsl_vec4_type(), array_size);
131 var->data.mode = nir_var_global;
132 var->name = ralloc_asprintf(var, "arr_%d", decl->Array.ArrayID);
133
134 exec_list_push_tail(&b->shader->globals, &var->node);
135
136 for (i = 0; i < array_size; i++) {
137 /* point all the matching slots to the same var,
138 * with appropriate offset set, mostly just so
139 * we know what to do when tgsi does a non-indirect
140 * access
141 */
142 c->temp_regs[decl->Range.First + i].reg = NULL;
143 c->temp_regs[decl->Range.First + i].var = var;
144 c->temp_regs[decl->Range.First + i].offset = i;
145 }
146 } else {
147 for (i = 0; i < array_size; i++) {
148 nir_register *reg = nir_local_reg_create(b->impl);
149 reg->num_components = 4;
150 c->temp_regs[decl->Range.First + i].reg = reg;
151 c->temp_regs[decl->Range.First + i].var = NULL;
152 c->temp_regs[decl->Range.First + i].offset = 0;
153 }
154 }
155 } else if (file == TGSI_FILE_ADDRESS) {
156 c->addr_reg = nir_local_reg_create(b->impl);
157 c->addr_reg->num_components = 4;
158 } else if (file == TGSI_FILE_SYSTEM_VALUE) {
159 /* Nothing to record for system values. */
160 } else if (file == TGSI_FILE_SAMPLER) {
161 /* Nothing to record for samplers. */
162 } else if (file == TGSI_FILE_SAMPLER_VIEW) {
163 struct tgsi_declaration_sampler_view *sview = &decl->SamplerView;
164 nir_alu_type type;
165
166 assert((sview->ReturnTypeX == sview->ReturnTypeY) &&
167 (sview->ReturnTypeX == sview->ReturnTypeZ) &&
168 (sview->ReturnTypeX == sview->ReturnTypeW));
169
170 switch (sview->ReturnTypeX) {
171 case TGSI_RETURN_TYPE_SINT:
172 type = nir_type_int;
173 break;
174 case TGSI_RETURN_TYPE_UINT:
175 type = nir_type_unsigned;
176 break;
177 case TGSI_RETURN_TYPE_FLOAT:
178 default:
179 type = nir_type_float;
180 break;
181 }
182
183 for (i = 0; i < array_size; i++) {
184 c->samp_types[decl->Range.First + i] = type;
185 }
186 } else {
187 bool is_array = (array_size > 1);
188
189 assert(file == TGSI_FILE_INPUT ||
190 file == TGSI_FILE_OUTPUT ||
191 file == TGSI_FILE_CONSTANT);
192
193 /* nothing to do for UBOs: */
194 if ((file == TGSI_FILE_CONSTANT) && decl->Declaration.Dimension)
195 return;
196
197 if ((file == TGSI_FILE_INPUT) || (file == TGSI_FILE_OUTPUT)) {
198 is_array = (is_array && decl->Declaration.Array &&
199 (decl->Array.ArrayID != 0));
200 }
201
202 for (i = 0; i < array_size; i++) {
203 unsigned idx = decl->Range.First + i;
204 nir_variable *var = rzalloc(b->shader, nir_variable);
205
206 var->data.driver_location = idx;
207
208 var->type = glsl_vec4_type();
209 if (is_array)
210 var->type = glsl_array_type(var->type, array_size);
211
212 switch (file) {
213 case TGSI_FILE_INPUT:
214 var->data.read_only = true;
215 var->data.mode = nir_var_shader_in;
216 var->name = ralloc_asprintf(var, "in_%d", idx);
217
218 /* We should probably translate to a VERT_ATTRIB_* or VARYING_SLOT_*
219 * instead, but nothing in NIR core is looking at the value
220 * currently, and this is less change to drivers.
221 */
222 var->data.location = decl->Semantic.Name;
223 var->data.index = decl->Semantic.Index;
224
225 /* We definitely need to translate the interpolation field, because
226 * nir_print will decode it.
227 */
228 switch (decl->Interp.Interpolate) {
229 case TGSI_INTERPOLATE_CONSTANT:
230 var->data.interpolation = INTERP_QUALIFIER_FLAT;
231 break;
232 case TGSI_INTERPOLATE_LINEAR:
233 var->data.interpolation = INTERP_QUALIFIER_NOPERSPECTIVE;
234 break;
235 case TGSI_INTERPOLATE_PERSPECTIVE:
236 var->data.interpolation = INTERP_QUALIFIER_SMOOTH;
237 break;
238 }
239
240 exec_list_push_tail(&b->shader->inputs, &var->node);
241 break;
242 case TGSI_FILE_OUTPUT: {
243 /* Since we can't load from outputs in the IR, we make temporaries
244 * for the outputs and emit stores to the real outputs at the end of
245 * the shader.
246 */
247 nir_register *reg = nir_local_reg_create(b->impl);
248 reg->num_components = 4;
249 if (is_array)
250 reg->num_array_elems = array_size;
251
252 var->data.mode = nir_var_shader_out;
253 var->name = ralloc_asprintf(var, "out_%d", idx);
254
255 var->data.location = decl->Semantic.Name;
256 if (decl->Semantic.Name == TGSI_SEMANTIC_COLOR &&
257 decl->Semantic.Index == 0 &&
258 c->scan->properties[TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS])
259 var->data.index = -1;
260 else
261 var->data.index = decl->Semantic.Index;
262
263 if (is_array) {
264 unsigned j;
265 for (j = 0; j < array_size; j++) {
266 c->output_regs[idx + j].offset = i + j;
267 c->output_regs[idx + j].reg = reg;
268 }
269 } else {
270 c->output_regs[idx].offset = i;
271 c->output_regs[idx].reg = reg;
272 }
273
274 exec_list_push_tail(&b->shader->outputs, &var->node);
275 }
276 break;
277 case TGSI_FILE_CONSTANT:
278 var->data.mode = nir_var_uniform;
279 var->name = ralloc_asprintf(var, "uniform_%d", idx);
280
281 exec_list_push_tail(&b->shader->uniforms, &var->node);
282 break;
283 default:
284 unreachable("bad declaration file");
285 return;
286 }
287
288 if (is_array)
289 break;
290 }
291
292 }
293 }
294
295 static void
296 ttn_emit_immediate(struct ttn_compile *c)
297 {
298 nir_builder *b = &c->build;
299 struct tgsi_full_immediate *tgsi_imm = &c->token->FullImmediate;
300 nir_load_const_instr *load_const;
301 int i;
302
303 load_const = nir_load_const_instr_create(b->shader, 4);
304 c->imm_defs[c->next_imm] = &load_const->def;
305 c->next_imm++;
306
307 for (i = 0; i < 4; i++)
308 load_const->value.u[i] = tgsi_imm->u[i].Uint;
309
310 nir_instr_insert_after_cf_list(b->cf_node_list, &load_const->instr);
311 }
312
313 static nir_src
314 ttn_src_for_indirect(struct ttn_compile *c, struct tgsi_ind_register *indirect);
315
316 /* generate either a constant or indirect deref chain for accessing an
317 * array variable.
318 */
319 static nir_deref_var *
320 ttn_array_deref(struct ttn_compile *c, nir_intrinsic_instr *instr,
321 nir_variable *var, unsigned offset,
322 struct tgsi_ind_register *indirect)
323 {
324 nir_deref_var *deref = nir_deref_var_create(instr, var);
325 nir_deref_array *arr = nir_deref_array_create(deref);
326
327 arr->base_offset = offset;
328 arr->deref.type = glsl_get_array_element(var->type);
329
330 if (indirect) {
331 arr->deref_array_type = nir_deref_array_type_indirect;
332 arr->indirect = ttn_src_for_indirect(c, indirect);
333 } else {
334 arr->deref_array_type = nir_deref_array_type_direct;
335 }
336
337 deref->deref.child = &arr->deref;
338
339 return deref;
340 }
341
342 static nir_src
343 ttn_src_for_file_and_index(struct ttn_compile *c, unsigned file, unsigned index,
344 struct tgsi_ind_register *indirect,
345 struct tgsi_dimension *dim,
346 struct tgsi_ind_register *dimind)
347 {
348 nir_builder *b = &c->build;
349 nir_src src;
350
351 memset(&src, 0, sizeof(src));
352
353 switch (file) {
354 case TGSI_FILE_TEMPORARY:
355 if (c->temp_regs[index].var) {
356 unsigned offset = c->temp_regs[index].offset;
357 nir_variable *var = c->temp_regs[index].var;
358 nir_intrinsic_instr *load;
359
360 load = nir_intrinsic_instr_create(b->shader,
361 nir_intrinsic_load_var);
362 load->num_components = 4;
363 load->variables[0] = ttn_array_deref(c, load, var, offset, indirect);
364
365 nir_ssa_dest_init(&load->instr, &load->dest, 4, NULL);
366 nir_instr_insert_after_cf_list(b->cf_node_list, &load->instr);
367
368 src = nir_src_for_ssa(&load->dest.ssa);
369
370 } else {
371 assert(!indirect);
372 src.reg.reg = c->temp_regs[index].reg;
373 }
374 assert(!dim);
375 break;
376
377 case TGSI_FILE_ADDRESS:
378 src.reg.reg = c->addr_reg;
379 assert(!dim);
380 break;
381
382 case TGSI_FILE_IMMEDIATE:
383 src = nir_src_for_ssa(c->imm_defs[index]);
384 assert(!indirect);
385 assert(!dim);
386 break;
387
388 case TGSI_FILE_SYSTEM_VALUE: {
389 nir_intrinsic_instr *load;
390 nir_intrinsic_op op;
391 unsigned ncomp = 1;
392
393 assert(!indirect);
394 assert(!dim);
395
396 switch (c->scan->system_value_semantic_name[index]) {
397 case TGSI_SEMANTIC_VERTEXID_NOBASE:
398 op = nir_intrinsic_load_vertex_id_zero_base;
399 break;
400 case TGSI_SEMANTIC_VERTEXID:
401 op = nir_intrinsic_load_vertex_id;
402 break;
403 case TGSI_SEMANTIC_BASEVERTEX:
404 op = nir_intrinsic_load_base_vertex;
405 break;
406 case TGSI_SEMANTIC_INSTANCEID:
407 op = nir_intrinsic_load_instance_id;
408 break;
409 default:
410 unreachable("bad system value");
411 }
412
413 load = nir_intrinsic_instr_create(b->shader, op);
414 load->num_components = ncomp;
415
416 nir_ssa_dest_init(&load->instr, &load->dest, ncomp, NULL);
417 nir_instr_insert_after_cf_list(b->cf_node_list, &load->instr);
418
419 src = nir_src_for_ssa(&load->dest.ssa);
420 break;
421 }
422
423 case TGSI_FILE_INPUT:
424 case TGSI_FILE_CONSTANT: {
425 nir_intrinsic_instr *load;
426 nir_intrinsic_op op;
427 unsigned srcn = 0;
428
429 switch (file) {
430 case TGSI_FILE_INPUT:
431 op = indirect ? nir_intrinsic_load_input_indirect :
432 nir_intrinsic_load_input;
433 assert(!dim);
434 break;
435 case TGSI_FILE_CONSTANT:
436 if (dim) {
437 op = indirect ? nir_intrinsic_load_ubo_indirect :
438 nir_intrinsic_load_ubo;
439 /* convert index from vec4 to byte: */
440 index *= 16;
441 } else {
442 op = indirect ? nir_intrinsic_load_uniform_indirect :
443 nir_intrinsic_load_uniform;
444 }
445 break;
446 default:
447 unreachable("No other load files supported");
448 break;
449 }
450
451 load = nir_intrinsic_instr_create(b->shader, op);
452
453 load->num_components = 4;
454 load->const_index[0] = index;
455 if (dim) {
456 if (dimind) {
457 load->src[srcn] =
458 ttn_src_for_file_and_index(c, dimind->File, dimind->Index,
459 NULL, NULL, NULL);
460 } else {
461 /* UBOs start at index 1 in TGSI: */
462 load->src[srcn] =
463 nir_src_for_ssa(nir_imm_int(b, dim->Index - 1));
464 }
465 srcn++;
466 }
467 if (indirect) {
468 load->src[srcn] = ttn_src_for_indirect(c, indirect);
469 if (dim) {
470 assert(load->src[srcn].is_ssa);
471 /* we also need to covert vec4 to byte here too: */
472 load->src[srcn] =
473 nir_src_for_ssa(nir_ishl(b, load->src[srcn].ssa,
474 nir_imm_int(b, 4)));
475 }
476 srcn++;
477 }
478 nir_ssa_dest_init(&load->instr, &load->dest, 4, NULL);
479 nir_instr_insert_after_cf_list(b->cf_node_list, &load->instr);
480
481 src = nir_src_for_ssa(&load->dest.ssa);
482 break;
483 }
484
485 default:
486 unreachable("bad src file");
487 }
488
489
490 return src;
491 }
492
493 static nir_src
494 ttn_src_for_indirect(struct ttn_compile *c, struct tgsi_ind_register *indirect)
495 {
496 nir_builder *b = &c->build;
497 nir_alu_src src;
498 memset(&src, 0, sizeof(src));
499 for (int i = 0; i < 4; i++)
500 src.swizzle[i] = indirect->Swizzle;
501 src.src = ttn_src_for_file_and_index(c,
502 indirect->File,
503 indirect->Index,
504 NULL, NULL, NULL);
505 return nir_src_for_ssa(nir_imov_alu(b, src, 1));
506 }
507
508 static nir_alu_dest
509 ttn_get_dest(struct ttn_compile *c, struct tgsi_full_dst_register *tgsi_fdst)
510 {
511 struct tgsi_dst_register *tgsi_dst = &tgsi_fdst->Register;
512 nir_alu_dest dest;
513 unsigned index = tgsi_dst->Index;
514
515 memset(&dest, 0, sizeof(dest));
516
517 if (tgsi_dst->File == TGSI_FILE_TEMPORARY) {
518 if (c->temp_regs[index].var) {
519 nir_builder *b = &c->build;
520 nir_intrinsic_instr *load;
521 struct tgsi_ind_register *indirect =
522 tgsi_dst->Indirect ? &tgsi_fdst->Indirect : NULL;
523 nir_register *reg;
524
525 /* this works, because TGSI will give us a base offset
526 * (in case of indirect index) that points back into
527 * the array. Access can be direct or indirect, we
528 * don't really care. Just create a one-shot dst reg
529 * that will get store_var'd back into the array var
530 * at the end of ttn_emit_instruction()
531 */
532 reg = nir_local_reg_create(c->build.impl);
533 reg->num_components = 4;
534 dest.dest.reg.reg = reg;
535 dest.dest.reg.base_offset = 0;
536
537 /* since the alu op might not write to all components
538 * of the temporary, we must first do a load_var to
539 * get the previous array elements into the register.
540 * This is one area that NIR could use a bit of
541 * improvement (or opt pass to clean up the mess
542 * once things are scalarized)
543 */
544
545 load = nir_intrinsic_instr_create(c->build.shader,
546 nir_intrinsic_load_var);
547 load->num_components = 4;
548 load->variables[0] =
549 ttn_array_deref(c, load, c->temp_regs[index].var,
550 c->temp_regs[index].offset,
551 indirect);
552
553 load->dest = nir_dest_for_reg(reg);
554
555 nir_instr_insert_after_cf_list(b->cf_node_list, &load->instr);
556 } else {
557 assert(!tgsi_dst->Indirect);
558 dest.dest.reg.reg = c->temp_regs[index].reg;
559 dest.dest.reg.base_offset = c->temp_regs[index].offset;
560 }
561 } else if (tgsi_dst->File == TGSI_FILE_OUTPUT) {
562 dest.dest.reg.reg = c->output_regs[index].reg;
563 dest.dest.reg.base_offset = c->output_regs[index].offset;
564 } else if (tgsi_dst->File == TGSI_FILE_ADDRESS) {
565 assert(index == 0);
566 dest.dest.reg.reg = c->addr_reg;
567 }
568
569 dest.write_mask = tgsi_dst->WriteMask;
570 dest.saturate = false;
571
572 if (tgsi_dst->Indirect && (tgsi_dst->File != TGSI_FILE_TEMPORARY)) {
573 nir_src *indirect = ralloc(c->build.shader, nir_src);
574 *indirect = ttn_src_for_indirect(c, &tgsi_fdst->Indirect);
575 dest.dest.reg.indirect = indirect;
576 }
577
578 return dest;
579 }
580
581 static nir_variable *
582 ttn_get_var(struct ttn_compile *c, struct tgsi_full_dst_register *tgsi_fdst)
583 {
584 struct tgsi_dst_register *tgsi_dst = &tgsi_fdst->Register;
585 unsigned index = tgsi_dst->Index;
586
587 if (tgsi_dst->File == TGSI_FILE_TEMPORARY) {
588 /* we should not have an indirect when there is no var! */
589 if (!c->temp_regs[index].var)
590 assert(!tgsi_dst->Indirect);
591 return c->temp_regs[index].var;
592 }
593
594 return NULL;
595 }
596
597 static nir_ssa_def *
598 ttn_get_src(struct ttn_compile *c, struct tgsi_full_src_register *tgsi_fsrc)
599 {
600 nir_builder *b = &c->build;
601 struct tgsi_src_register *tgsi_src = &tgsi_fsrc->Register;
602 unsigned tgsi_opcode = c->token->FullInstruction.Instruction.Opcode;
603 unsigned tgsi_src_type = tgsi_opcode_infer_src_type(tgsi_opcode);
604 bool src_is_float = !(tgsi_src_type == TGSI_TYPE_SIGNED ||
605 tgsi_src_type == TGSI_TYPE_UNSIGNED);
606 nir_alu_src src;
607
608 memset(&src, 0, sizeof(src));
609
610 if (tgsi_src->File == TGSI_FILE_NULL) {
611 return nir_imm_float(b, 0.0);
612 } else if (tgsi_src->File == TGSI_FILE_SAMPLER) {
613 /* Only the index of the sampler gets used in texturing, and it will
614 * handle looking that up on its own instead of using the nir_alu_src.
615 */
616 assert(!tgsi_src->Indirect);
617 return NULL;
618 } else {
619 struct tgsi_ind_register *ind = NULL;
620 struct tgsi_dimension *dim = NULL;
621 struct tgsi_ind_register *dimind = NULL;
622 if (tgsi_src->Indirect)
623 ind = &tgsi_fsrc->Indirect;
624 if (tgsi_src->Dimension) {
625 dim = &tgsi_fsrc->Dimension;
626 if (dim->Indirect)
627 dimind = &tgsi_fsrc->DimIndirect;
628 }
629 src.src = ttn_src_for_file_and_index(c,
630 tgsi_src->File,
631 tgsi_src->Index,
632 ind, dim, dimind);
633 }
634
635 src.swizzle[0] = tgsi_src->SwizzleX;
636 src.swizzle[1] = tgsi_src->SwizzleY;
637 src.swizzle[2] = tgsi_src->SwizzleZ;
638 src.swizzle[3] = tgsi_src->SwizzleW;
639
640 nir_ssa_def *def = nir_fmov_alu(b, src, 4);
641
642 if (tgsi_src->Absolute) {
643 if (src_is_float)
644 def = nir_fabs(b, def);
645 else
646 def = nir_iabs(b, def);
647 }
648
649 if (tgsi_src->Negate) {
650 if (src_is_float)
651 def = nir_fneg(b, def);
652 else
653 def = nir_ineg(b, def);
654 }
655
656 return def;
657 }
658
659 static void
660 ttn_alu(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
661 {
662 unsigned num_srcs = nir_op_infos[op].num_inputs;
663 nir_alu_instr *instr = nir_alu_instr_create(b->shader, op);
664 unsigned i;
665
666 for (i = 0; i < num_srcs; i++)
667 instr->src[i].src = nir_src_for_ssa(src[i]);
668
669 instr->dest = dest;
670 nir_instr_insert_after_cf_list(b->cf_node_list, &instr->instr);
671 }
672
673 static void
674 ttn_move_dest_masked(nir_builder *b, nir_alu_dest dest,
675 nir_ssa_def *def, unsigned write_mask)
676 {
677 if (!(dest.write_mask & write_mask))
678 return;
679
680 nir_alu_instr *mov = nir_alu_instr_create(b->shader, nir_op_imov);
681 mov->dest = dest;
682 mov->dest.write_mask &= write_mask;
683 mov->src[0].src = nir_src_for_ssa(def);
684 for (unsigned i = def->num_components; i < 4; i++)
685 mov->src[0].swizzle[i] = def->num_components - 1;
686 nir_instr_insert_after_cf_list(b->cf_node_list, &mov->instr);
687 }
688
689 static void
690 ttn_move_dest(nir_builder *b, nir_alu_dest dest, nir_ssa_def *def)
691 {
692 ttn_move_dest_masked(b, dest, def, TGSI_WRITEMASK_XYZW);
693 }
694
695 static void
696 ttn_arl(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
697 {
698 ttn_move_dest(b, dest, nir_f2i(b, nir_ffloor(b, src[0])));
699 }
700
701 /* EXP - Approximate Exponential Base 2
702 * dst.x = 2^{\lfloor src.x\rfloor}
703 * dst.y = src.x - \lfloor src.x\rfloor
704 * dst.z = 2^{src.x}
705 * dst.w = 1.0
706 */
707 static void
708 ttn_exp(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
709 {
710 nir_ssa_def *srcx = ttn_channel(b, src[0], X);
711
712 ttn_move_dest_masked(b, dest, nir_fexp2(b, nir_ffloor(b, srcx)),
713 TGSI_WRITEMASK_X);
714 ttn_move_dest_masked(b, dest, nir_fsub(b, srcx, nir_ffloor(b, srcx)),
715 TGSI_WRITEMASK_Y);
716 ttn_move_dest_masked(b, dest, nir_fexp2(b, srcx), TGSI_WRITEMASK_Z);
717 ttn_move_dest_masked(b, dest, nir_imm_float(b, 1.0), TGSI_WRITEMASK_W);
718 }
719
720 /* LOG - Approximate Logarithm Base 2
721 * dst.x = \lfloor\log_2{|src.x|}\rfloor
722 * dst.y = \frac{|src.x|}{2^{\lfloor\log_2{|src.x|}\rfloor}}
723 * dst.z = \log_2{|src.x|}
724 * dst.w = 1.0
725 */
726 static void
727 ttn_log(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
728 {
729 nir_ssa_def *abs_srcx = nir_fabs(b, ttn_channel(b, src[0], X));
730 nir_ssa_def *log2 = nir_flog2(b, abs_srcx);
731
732 ttn_move_dest_masked(b, dest, nir_ffloor(b, log2), TGSI_WRITEMASK_X);
733 ttn_move_dest_masked(b, dest,
734 nir_fdiv(b, abs_srcx, nir_fexp2(b, nir_ffloor(b, log2))),
735 TGSI_WRITEMASK_Y);
736 ttn_move_dest_masked(b, dest, nir_flog2(b, abs_srcx), TGSI_WRITEMASK_Z);
737 ttn_move_dest_masked(b, dest, nir_imm_float(b, 1.0), TGSI_WRITEMASK_W);
738 }
739
740 /* DST - Distance Vector
741 * dst.x = 1.0
742 * dst.y = src0.y \times src1.y
743 * dst.z = src0.z
744 * dst.w = src1.w
745 */
746 static void
747 ttn_dst(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
748 {
749 ttn_move_dest_masked(b, dest, nir_imm_float(b, 1.0), TGSI_WRITEMASK_X);
750 ttn_move_dest_masked(b, dest, nir_fmul(b, src[0], src[1]), TGSI_WRITEMASK_Y);
751 ttn_move_dest_masked(b, dest, nir_fmov(b, src[0]), TGSI_WRITEMASK_Z);
752 ttn_move_dest_masked(b, dest, nir_fmov(b, src[1]), TGSI_WRITEMASK_W);
753 }
754
755 /* LIT - Light Coefficients
756 * dst.x = 1.0
757 * dst.y = max(src.x, 0.0)
758 * dst.z = (src.x > 0.0) ? max(src.y, 0.0)^{clamp(src.w, -128.0, 128.0))} : 0
759 * dst.w = 1.0
760 */
761 static void
762 ttn_lit(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
763 {
764 ttn_move_dest_masked(b, dest, nir_imm_float(b, 1.0), TGSI_WRITEMASK_XW);
765
766 ttn_move_dest_masked(b, dest, nir_fmax(b, ttn_channel(b, src[0], X),
767 nir_imm_float(b, 0.0)), TGSI_WRITEMASK_Y);
768
769 if (dest.write_mask & TGSI_WRITEMASK_Z) {
770 nir_ssa_def *src0_y = ttn_channel(b, src[0], Y);
771 nir_ssa_def *wclamp = nir_fmax(b, nir_fmin(b, ttn_channel(b, src[0], W),
772 nir_imm_float(b, 128.0)),
773 nir_imm_float(b, -128.0));
774 nir_ssa_def *pow = nir_fpow(b, nir_fmax(b, src0_y, nir_imm_float(b, 0.0)),
775 wclamp);
776
777 ttn_move_dest_masked(b, dest,
778 nir_bcsel(b,
779 nir_fge(b,
780 nir_imm_float(b, 0.0),
781 ttn_channel(b, src[0], X)),
782 nir_imm_float(b, 0.0),
783 pow),
784 TGSI_WRITEMASK_Z);
785 }
786 }
787
788 /* SCS - Sine Cosine
789 * dst.x = \cos{src.x}
790 * dst.y = \sin{src.x}
791 * dst.z = 0.0
792 * dst.w = 1.0
793 */
794 static void
795 ttn_scs(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
796 {
797 ttn_move_dest_masked(b, dest, nir_fcos(b, ttn_channel(b, src[0], X)),
798 TGSI_WRITEMASK_X);
799 ttn_move_dest_masked(b, dest, nir_fsin(b, ttn_channel(b, src[0], X)),
800 TGSI_WRITEMASK_Y);
801 ttn_move_dest_masked(b, dest, nir_imm_float(b, 0.0), TGSI_WRITEMASK_Z);
802 ttn_move_dest_masked(b, dest, nir_imm_float(b, 1.0), TGSI_WRITEMASK_W);
803 }
804
805 static void
806 ttn_sle(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
807 {
808 ttn_move_dest(b, dest, nir_sge(b, src[1], src[0]));
809 }
810
811 static void
812 ttn_sgt(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
813 {
814 ttn_move_dest(b, dest, nir_slt(b, src[1], src[0]));
815 }
816
817 static void
818 ttn_clamp(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
819 {
820 ttn_move_dest(b, dest, nir_fmin(b, nir_fmax(b, src[0], src[1]), src[2]));
821 }
822
823 static void
824 ttn_xpd(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
825 {
826 ttn_move_dest_masked(b, dest,
827 nir_fsub(b,
828 nir_fmul(b,
829 ttn_swizzle(b, src[0], Y, Z, X, X),
830 ttn_swizzle(b, src[1], Z, X, Y, X)),
831 nir_fmul(b,
832 ttn_swizzle(b, src[1], Y, Z, X, X),
833 ttn_swizzle(b, src[0], Z, X, Y, X))),
834 TGSI_WRITEMASK_XYZ);
835 ttn_move_dest_masked(b, dest, nir_imm_float(b, 1.0), TGSI_WRITEMASK_W);
836 }
837
838 static void
839 ttn_dp2a(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
840 {
841 ttn_move_dest(b, dest,
842 ttn_channel(b, nir_fadd(b, nir_fdot2(b, src[0], src[1]),
843 src[2]),
844 X));
845 }
846
847 static void
848 ttn_dp2(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
849 {
850 ttn_move_dest(b, dest, nir_fdot2(b, src[0], src[1]));
851 }
852
853 static void
854 ttn_dp3(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
855 {
856 ttn_move_dest(b, dest, nir_fdot3(b, src[0], src[1]));
857 }
858
859 static void
860 ttn_dp4(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
861 {
862 ttn_move_dest(b, dest, nir_fdot4(b, src[0], src[1]));
863 }
864
865 static void
866 ttn_dph(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
867 {
868 ttn_move_dest(b, dest, nir_fadd(b, nir_fdot3(b, src[0], src[1]),
869 ttn_channel(b, src[1], W)));
870 }
871
872 static void
873 ttn_umad(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
874 {
875 ttn_move_dest(b, dest, nir_iadd(b, nir_imul(b, src[0], src[1]), src[2]));
876 }
877
878 static void
879 ttn_arr(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
880 {
881 ttn_move_dest(b, dest, nir_ffloor(b, nir_fadd(b, src[0], nir_imm_float(b, 0.5))));
882 }
883
884 static void
885 ttn_cmp(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
886 {
887 ttn_move_dest(b, dest, nir_bcsel(b,
888 nir_flt(b, src[0], nir_imm_float(b, 0.0)),
889 src[1], src[2]));
890 }
891
892 static void
893 ttn_ucmp(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
894 {
895 ttn_move_dest(b, dest, nir_bcsel(b,
896 nir_ine(b, src[0], nir_imm_int(b, 0)),
897 src[1], src[2]));
898 }
899
900 static void
901 ttn_kill(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
902 {
903 nir_intrinsic_instr *discard =
904 nir_intrinsic_instr_create(b->shader, nir_intrinsic_discard);
905 nir_instr_insert_after_cf_list(b->cf_node_list, &discard->instr);
906 }
907
908 static void
909 ttn_kill_if(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
910 {
911 nir_ssa_def *cmp = nir_bany4(b, nir_flt(b, src[0], nir_imm_float(b, 0.0)));
912 nir_intrinsic_instr *discard =
913 nir_intrinsic_instr_create(b->shader, nir_intrinsic_discard_if);
914 discard->src[0] = nir_src_for_ssa(cmp);
915 nir_instr_insert_after_cf_list(b->cf_node_list, &discard->instr);
916 }
917
918 static void
919 ttn_if(struct ttn_compile *c, nir_ssa_def *src, bool is_uint)
920 {
921 nir_builder *b = &c->build;
922
923 /* Save the outside-of-the-if-statement node list. */
924 c->if_stack[c->if_stack_pos] = b->cf_node_list;
925 c->if_stack_pos++;
926
927 src = ttn_channel(b, src, X);
928
929 nir_if *if_stmt = nir_if_create(b->shader);
930 if (is_uint) {
931 if_stmt->condition = nir_src_for_ssa(nir_ine(b, src, nir_imm_int(b, 0)));
932 } else {
933 if_stmt->condition = nir_src_for_ssa(nir_fne(b, src, nir_imm_int(b, 0)));
934 }
935 nir_cf_node_insert_end(b->cf_node_list, &if_stmt->cf_node);
936
937 nir_builder_insert_after_cf_list(b, &if_stmt->then_list);
938
939 c->if_stack[c->if_stack_pos] = &if_stmt->else_list;
940 c->if_stack_pos++;
941 }
942
943 static void
944 ttn_else(struct ttn_compile *c)
945 {
946 nir_builder *b = &c->build;
947
948 nir_builder_insert_after_cf_list(b, c->if_stack[c->if_stack_pos - 1]);
949 }
950
951 static void
952 ttn_endif(struct ttn_compile *c)
953 {
954 nir_builder *b = &c->build;
955
956 c->if_stack_pos -= 2;
957 nir_builder_insert_after_cf_list(b, c->if_stack[c->if_stack_pos]);
958 }
959
960 static void
961 ttn_bgnloop(struct ttn_compile *c)
962 {
963 nir_builder *b = &c->build;
964
965 /* Save the outside-of-the-loop node list. */
966 c->loop_stack[c->loop_stack_pos] = b->cf_node_list;
967 c->loop_stack_pos++;
968
969 nir_loop *loop = nir_loop_create(b->shader);
970 nir_cf_node_insert_end(b->cf_node_list, &loop->cf_node);
971
972 nir_builder_insert_after_cf_list(b, &loop->body);
973 }
974
975 static void
976 ttn_cont(nir_builder *b)
977 {
978 nir_jump_instr *instr = nir_jump_instr_create(b->shader, nir_jump_continue);
979 nir_instr_insert_after_cf_list(b->cf_node_list, &instr->instr);
980 }
981
982 static void
983 ttn_brk(nir_builder *b)
984 {
985 nir_jump_instr *instr = nir_jump_instr_create(b->shader, nir_jump_break);
986 nir_instr_insert_after_cf_list(b->cf_node_list, &instr->instr);
987 }
988
989 static void
990 ttn_endloop(struct ttn_compile *c)
991 {
992 nir_builder *b = &c->build;
993
994 c->loop_stack_pos--;
995 nir_builder_insert_after_cf_list(b, c->loop_stack[c->loop_stack_pos]);
996 }
997
998 static void
999 setup_texture_info(nir_tex_instr *instr, unsigned texture)
1000 {
1001 switch (texture) {
1002 case TGSI_TEXTURE_1D:
1003 instr->sampler_dim = GLSL_SAMPLER_DIM_1D;
1004 break;
1005 case TGSI_TEXTURE_1D_ARRAY:
1006 instr->sampler_dim = GLSL_SAMPLER_DIM_1D;
1007 instr->is_array = true;
1008 break;
1009 case TGSI_TEXTURE_SHADOW1D:
1010 instr->sampler_dim = GLSL_SAMPLER_DIM_1D;
1011 instr->is_shadow = true;
1012 break;
1013 case TGSI_TEXTURE_SHADOW1D_ARRAY:
1014 instr->sampler_dim = GLSL_SAMPLER_DIM_1D;
1015 instr->is_shadow = true;
1016 instr->is_array = true;
1017 break;
1018 case TGSI_TEXTURE_2D:
1019 instr->sampler_dim = GLSL_SAMPLER_DIM_2D;
1020 break;
1021 case TGSI_TEXTURE_2D_ARRAY:
1022 instr->sampler_dim = GLSL_SAMPLER_DIM_2D;
1023 instr->is_array = true;
1024 break;
1025 case TGSI_TEXTURE_2D_MSAA:
1026 instr->sampler_dim = GLSL_SAMPLER_DIM_MS;
1027 break;
1028 case TGSI_TEXTURE_2D_ARRAY_MSAA:
1029 instr->sampler_dim = GLSL_SAMPLER_DIM_MS;
1030 instr->is_array = true;
1031 break;
1032 case TGSI_TEXTURE_SHADOW2D:
1033 instr->sampler_dim = GLSL_SAMPLER_DIM_2D;
1034 instr->is_shadow = true;
1035 break;
1036 case TGSI_TEXTURE_SHADOW2D_ARRAY:
1037 instr->sampler_dim = GLSL_SAMPLER_DIM_2D;
1038 instr->is_shadow = true;
1039 instr->is_array = true;
1040 break;
1041 case TGSI_TEXTURE_3D:
1042 instr->sampler_dim = GLSL_SAMPLER_DIM_3D;
1043 break;
1044 case TGSI_TEXTURE_CUBE:
1045 instr->sampler_dim = GLSL_SAMPLER_DIM_CUBE;
1046 break;
1047 case TGSI_TEXTURE_CUBE_ARRAY:
1048 instr->sampler_dim = GLSL_SAMPLER_DIM_CUBE;
1049 instr->is_array = true;
1050 break;
1051 case TGSI_TEXTURE_SHADOWCUBE:
1052 instr->sampler_dim = GLSL_SAMPLER_DIM_CUBE;
1053 instr->is_shadow = true;
1054 break;
1055 case TGSI_TEXTURE_SHADOWCUBE_ARRAY:
1056 instr->sampler_dim = GLSL_SAMPLER_DIM_CUBE;
1057 instr->is_shadow = true;
1058 instr->is_array = true;
1059 break;
1060 case TGSI_TEXTURE_RECT:
1061 instr->sampler_dim = GLSL_SAMPLER_DIM_RECT;
1062 break;
1063 case TGSI_TEXTURE_SHADOWRECT:
1064 instr->sampler_dim = GLSL_SAMPLER_DIM_RECT;
1065 instr->is_shadow = true;
1066 break;
1067 default:
1068 fprintf(stderr, "Unknown TGSI texture target %d\n", texture);
1069 abort();
1070 }
1071 }
1072
1073 static void
1074 ttn_tex(struct ttn_compile *c, nir_alu_dest dest, nir_ssa_def **src)
1075 {
1076 nir_builder *b = &c->build;
1077 struct tgsi_full_instruction *tgsi_inst = &c->token->FullInstruction;
1078 nir_tex_instr *instr;
1079 nir_texop op;
1080 unsigned num_srcs, samp = 1, sview, i;
1081
1082 switch (tgsi_inst->Instruction.Opcode) {
1083 case TGSI_OPCODE_TEX:
1084 op = nir_texop_tex;
1085 num_srcs = 1;
1086 break;
1087 case TGSI_OPCODE_TXP:
1088 op = nir_texop_tex;
1089 num_srcs = 2;
1090 break;
1091 case TGSI_OPCODE_TXB:
1092 op = nir_texop_txb;
1093 num_srcs = 2;
1094 break;
1095 case TGSI_OPCODE_TXB2:
1096 op = nir_texop_txb;
1097 num_srcs = 2;
1098 samp = 2;
1099 break;
1100 case TGSI_OPCODE_TXL:
1101 op = nir_texop_txl;
1102 num_srcs = 2;
1103 break;
1104 case TGSI_OPCODE_TXL2:
1105 op = nir_texop_txl;
1106 num_srcs = 2;
1107 samp = 2;
1108 break;
1109 case TGSI_OPCODE_TXF:
1110 if (tgsi_inst->Texture.Texture == TGSI_TEXTURE_2D_MSAA ||
1111 tgsi_inst->Texture.Texture == TGSI_TEXTURE_2D_ARRAY_MSAA) {
1112 op = nir_texop_txf_ms;
1113 } else {
1114 op = nir_texop_txf;
1115 }
1116 num_srcs = 2;
1117 break;
1118 case TGSI_OPCODE_TXD:
1119 op = nir_texop_txd;
1120 num_srcs = 3;
1121 samp = 3;
1122 break;
1123
1124 default:
1125 fprintf(stderr, "unknown TGSI tex op %d\n", tgsi_inst->Instruction.Opcode);
1126 abort();
1127 }
1128
1129 if (tgsi_inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D ||
1130 tgsi_inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D_ARRAY ||
1131 tgsi_inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D ||
1132 tgsi_inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D_ARRAY ||
1133 tgsi_inst->Texture.Texture == TGSI_TEXTURE_SHADOWRECT ||
1134 tgsi_inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE ||
1135 tgsi_inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY) {
1136 num_srcs++;
1137 }
1138
1139 num_srcs += tgsi_inst->Texture.NumOffsets;
1140
1141 instr = nir_tex_instr_create(b->shader, num_srcs);
1142 instr->op = op;
1143
1144 setup_texture_info(instr, tgsi_inst->Texture.Texture);
1145
1146 switch (instr->sampler_dim) {
1147 case GLSL_SAMPLER_DIM_1D:
1148 case GLSL_SAMPLER_DIM_BUF:
1149 instr->coord_components = 1;
1150 break;
1151 case GLSL_SAMPLER_DIM_2D:
1152 case GLSL_SAMPLER_DIM_RECT:
1153 case GLSL_SAMPLER_DIM_EXTERNAL:
1154 case GLSL_SAMPLER_DIM_MS:
1155 instr->coord_components = 2;
1156 break;
1157 case GLSL_SAMPLER_DIM_3D:
1158 case GLSL_SAMPLER_DIM_CUBE:
1159 instr->coord_components = 3;
1160 break;
1161 }
1162
1163 if (instr->is_array)
1164 instr->coord_components++;
1165
1166 assert(tgsi_inst->Src[samp].Register.File == TGSI_FILE_SAMPLER);
1167 instr->sampler_index = tgsi_inst->Src[samp].Register.Index;
1168
1169 /* TODO if we supported any opc's which take an explicit SVIEW
1170 * src, we would use that here instead. But for the "legacy"
1171 * texture opc's the SVIEW index is same as SAMP index:
1172 */
1173 sview = instr->sampler_index;
1174
1175 if (sview < c->num_samp_types) {
1176 instr->dest_type = c->samp_types[sview];
1177 } else {
1178 instr->dest_type = nir_type_float;
1179 }
1180
1181 unsigned src_number = 0;
1182
1183 instr->src[src_number].src =
1184 nir_src_for_ssa(nir_swizzle(b, src[0], SWIZ(X, Y, Z, W),
1185 instr->coord_components, false));
1186 instr->src[src_number].src_type = nir_tex_src_coord;
1187 src_number++;
1188
1189 if (tgsi_inst->Instruction.Opcode == TGSI_OPCODE_TXP) {
1190 instr->src[src_number].src = nir_src_for_ssa(ttn_channel(b, src[0], W));
1191 instr->src[src_number].src_type = nir_tex_src_projector;
1192 src_number++;
1193 }
1194
1195 if (tgsi_inst->Instruction.Opcode == TGSI_OPCODE_TXB) {
1196 instr->src[src_number].src = nir_src_for_ssa(ttn_channel(b, src[0], W));
1197 instr->src[src_number].src_type = nir_tex_src_bias;
1198 src_number++;
1199 }
1200
1201 if (tgsi_inst->Instruction.Opcode == TGSI_OPCODE_TXB2) {
1202 instr->src[src_number].src = nir_src_for_ssa(ttn_channel(b, src[1], X));
1203 instr->src[src_number].src_type = nir_tex_src_bias;
1204 src_number++;
1205 }
1206
1207 if (tgsi_inst->Instruction.Opcode == TGSI_OPCODE_TXL) {
1208 instr->src[src_number].src = nir_src_for_ssa(ttn_channel(b, src[0], W));
1209 instr->src[src_number].src_type = nir_tex_src_lod;
1210 src_number++;
1211 }
1212
1213 if (tgsi_inst->Instruction.Opcode == TGSI_OPCODE_TXL2) {
1214 instr->src[src_number].src = nir_src_for_ssa(ttn_channel(b, src[1], X));
1215 instr->src[src_number].src_type = nir_tex_src_lod;
1216 src_number++;
1217 }
1218
1219 if (tgsi_inst->Instruction.Opcode == TGSI_OPCODE_TXF) {
1220 instr->src[src_number].src = nir_src_for_ssa(ttn_channel(b, src[0], W));
1221 if (op == nir_texop_txf_ms)
1222 instr->src[src_number].src_type = nir_tex_src_ms_index;
1223 else
1224 instr->src[src_number].src_type = nir_tex_src_lod;
1225 src_number++;
1226 }
1227
1228 if (tgsi_inst->Instruction.Opcode == TGSI_OPCODE_TXD) {
1229 instr->src[src_number].src =
1230 nir_src_for_ssa(nir_swizzle(b, src[1], SWIZ(X, Y, Z, W),
1231 instr->coord_components, false));
1232 instr->src[src_number].src_type = nir_tex_src_ddx;
1233 src_number++;
1234 instr->src[src_number].src =
1235 nir_src_for_ssa(nir_swizzle(b, src[2], SWIZ(X, Y, Z, W),
1236 instr->coord_components, false));
1237 instr->src[src_number].src_type = nir_tex_src_ddy;
1238 src_number++;
1239 }
1240
1241 if (instr->is_shadow) {
1242 if (instr->coord_components < 3)
1243 instr->src[src_number].src = nir_src_for_ssa(ttn_channel(b, src[0], Z));
1244 else
1245 instr->src[src_number].src = nir_src_for_ssa(ttn_channel(b, src[0], W));
1246
1247 instr->src[src_number].src_type = nir_tex_src_comparitor;
1248 src_number++;
1249 }
1250
1251 for (i = 0; i < tgsi_inst->Texture.NumOffsets; i++) {
1252 struct tgsi_texture_offset *tex_offset = &tgsi_inst->TexOffsets[i];
1253 /* since TexOffset ins't using tgsi_full_src_register we get to
1254 * do some extra gymnastics:
1255 */
1256 nir_alu_src src;
1257
1258 memset(&src, 0, sizeof(src));
1259
1260 src.src = ttn_src_for_file_and_index(c,
1261 tex_offset->File,
1262 tex_offset->Index,
1263 NULL, NULL, NULL);
1264
1265 src.swizzle[0] = tex_offset->SwizzleX;
1266 src.swizzle[1] = tex_offset->SwizzleY;
1267 src.swizzle[2] = tex_offset->SwizzleZ;
1268 src.swizzle[3] = TGSI_SWIZZLE_W;
1269
1270 instr->src[src_number].src_type = nir_tex_src_offset;
1271 instr->src[src_number].src = nir_src_for_ssa(
1272 nir_fmov_alu(b, src, nir_tex_instr_src_size(instr, src_number)));
1273 src_number++;
1274 }
1275
1276 assert(src_number == num_srcs);
1277
1278 nir_ssa_dest_init(&instr->instr, &instr->dest, 4, NULL);
1279 nir_instr_insert_after_cf_list(b->cf_node_list, &instr->instr);
1280
1281 /* Resolve the writemask on the texture op. */
1282 ttn_move_dest(b, dest, &instr->dest.ssa);
1283 }
1284
1285 /* TGSI_OPCODE_TXQ is actually two distinct operations:
1286 *
1287 * dst.x = texture\_width(unit, lod)
1288 * dst.y = texture\_height(unit, lod)
1289 * dst.z = texture\_depth(unit, lod)
1290 * dst.w = texture\_levels(unit)
1291 *
1292 * dst.xyz map to NIR txs opcode, and dst.w maps to query_levels
1293 */
1294 static void
1295 ttn_txq(struct ttn_compile *c, nir_alu_dest dest, nir_ssa_def **src)
1296 {
1297 nir_builder *b = &c->build;
1298 struct tgsi_full_instruction *tgsi_inst = &c->token->FullInstruction;
1299 nir_tex_instr *txs, *qlv;
1300
1301 txs = nir_tex_instr_create(b->shader, 1);
1302 txs->op = nir_texop_txs;
1303 setup_texture_info(txs, tgsi_inst->Texture.Texture);
1304
1305 qlv = nir_tex_instr_create(b->shader, 0);
1306 qlv->op = nir_texop_query_levels;
1307 setup_texture_info(qlv, tgsi_inst->Texture.Texture);
1308
1309 assert(tgsi_inst->Src[1].Register.File == TGSI_FILE_SAMPLER);
1310 txs->sampler_index = tgsi_inst->Src[1].Register.Index;
1311 qlv->sampler_index = tgsi_inst->Src[1].Register.Index;
1312
1313 /* only single src, the lod: */
1314 txs->src[0].src = nir_src_for_ssa(ttn_channel(b, src[0], X));
1315 txs->src[0].src_type = nir_tex_src_lod;
1316
1317 nir_ssa_dest_init(&txs->instr, &txs->dest, 3, NULL);
1318 nir_instr_insert_after_cf_list(b->cf_node_list, &txs->instr);
1319
1320 nir_ssa_dest_init(&qlv->instr, &qlv->dest, 1, NULL);
1321 nir_instr_insert_after_cf_list(b->cf_node_list, &qlv->instr);
1322
1323 ttn_move_dest_masked(b, dest, &txs->dest.ssa, TGSI_WRITEMASK_XYZ);
1324 ttn_move_dest_masked(b, dest, &qlv->dest.ssa, TGSI_WRITEMASK_W);
1325 }
1326
1327 static const nir_op op_trans[TGSI_OPCODE_LAST] = {
1328 [TGSI_OPCODE_ARL] = 0,
1329 [TGSI_OPCODE_MOV] = nir_op_fmov,
1330 [TGSI_OPCODE_LIT] = 0,
1331 [TGSI_OPCODE_RCP] = nir_op_frcp,
1332 [TGSI_OPCODE_RSQ] = nir_op_frsq,
1333 [TGSI_OPCODE_EXP] = 0,
1334 [TGSI_OPCODE_LOG] = 0,
1335 [TGSI_OPCODE_MUL] = nir_op_fmul,
1336 [TGSI_OPCODE_ADD] = nir_op_fadd,
1337 [TGSI_OPCODE_DP3] = 0,
1338 [TGSI_OPCODE_DP4] = 0,
1339 [TGSI_OPCODE_DST] = 0,
1340 [TGSI_OPCODE_MIN] = nir_op_fmin,
1341 [TGSI_OPCODE_MAX] = nir_op_fmax,
1342 [TGSI_OPCODE_SLT] = nir_op_slt,
1343 [TGSI_OPCODE_SGE] = nir_op_sge,
1344 [TGSI_OPCODE_MAD] = nir_op_ffma,
1345 [TGSI_OPCODE_SUB] = nir_op_fsub,
1346 [TGSI_OPCODE_LRP] = 0,
1347 [TGSI_OPCODE_SQRT] = nir_op_fsqrt,
1348 [TGSI_OPCODE_DP2A] = 0,
1349 [TGSI_OPCODE_FRC] = nir_op_ffract,
1350 [TGSI_OPCODE_CLAMP] = 0,
1351 [TGSI_OPCODE_FLR] = nir_op_ffloor,
1352 [TGSI_OPCODE_ROUND] = nir_op_fround_even,
1353 [TGSI_OPCODE_EX2] = nir_op_fexp2,
1354 [TGSI_OPCODE_LG2] = nir_op_flog2,
1355 [TGSI_OPCODE_POW] = nir_op_fpow,
1356 [TGSI_OPCODE_XPD] = 0,
1357 [TGSI_OPCODE_ABS] = nir_op_fabs,
1358 [TGSI_OPCODE_DPH] = 0,
1359 [TGSI_OPCODE_COS] = nir_op_fcos,
1360 [TGSI_OPCODE_DDX] = nir_op_fddx,
1361 [TGSI_OPCODE_DDY] = nir_op_fddy,
1362 [TGSI_OPCODE_KILL] = 0,
1363 [TGSI_OPCODE_PK2H] = 0, /* XXX */
1364 [TGSI_OPCODE_PK2US] = 0, /* XXX */
1365 [TGSI_OPCODE_PK4B] = 0, /* XXX */
1366 [TGSI_OPCODE_PK4UB] = 0, /* XXX */
1367 [TGSI_OPCODE_SEQ] = nir_op_seq,
1368 [TGSI_OPCODE_SGT] = 0,
1369 [TGSI_OPCODE_SIN] = nir_op_fsin,
1370 [TGSI_OPCODE_SNE] = nir_op_sne,
1371 [TGSI_OPCODE_SLE] = 0,
1372 [TGSI_OPCODE_TEX] = 0,
1373 [TGSI_OPCODE_TXD] = 0,
1374 [TGSI_OPCODE_TXP] = 0,
1375 [TGSI_OPCODE_UP2H] = 0, /* XXX */
1376 [TGSI_OPCODE_UP2US] = 0, /* XXX */
1377 [TGSI_OPCODE_UP4B] = 0, /* XXX */
1378 [TGSI_OPCODE_UP4UB] = 0, /* XXX */
1379 [TGSI_OPCODE_ARR] = 0,
1380
1381 /* No function calls, yet. */
1382 [TGSI_OPCODE_CAL] = 0, /* XXX */
1383 [TGSI_OPCODE_RET] = 0, /* XXX */
1384
1385 [TGSI_OPCODE_SSG] = nir_op_fsign,
1386 [TGSI_OPCODE_CMP] = 0,
1387 [TGSI_OPCODE_SCS] = 0,
1388 [TGSI_OPCODE_TXB] = 0,
1389 [TGSI_OPCODE_DIV] = nir_op_fdiv,
1390 [TGSI_OPCODE_DP2] = 0,
1391 [TGSI_OPCODE_DP2A] = 0,
1392 [TGSI_OPCODE_TXL] = 0,
1393
1394 [TGSI_OPCODE_BRK] = 0,
1395 [TGSI_OPCODE_IF] = 0,
1396 [TGSI_OPCODE_UIF] = 0,
1397 [TGSI_OPCODE_ELSE] = 0,
1398 [TGSI_OPCODE_ENDIF] = 0,
1399
1400 [TGSI_OPCODE_DDX_FINE] = nir_op_fddx_fine,
1401 [TGSI_OPCODE_DDY_FINE] = nir_op_fddy_fine,
1402
1403 [TGSI_OPCODE_PUSHA] = 0, /* XXX */
1404 [TGSI_OPCODE_POPA] = 0, /* XXX */
1405
1406 [TGSI_OPCODE_CEIL] = nir_op_fceil,
1407 [TGSI_OPCODE_I2F] = nir_op_i2f,
1408 [TGSI_OPCODE_NOT] = nir_op_inot,
1409 [TGSI_OPCODE_TRUNC] = nir_op_ftrunc,
1410 [TGSI_OPCODE_SHL] = nir_op_ishl,
1411 [TGSI_OPCODE_AND] = nir_op_iand,
1412 [TGSI_OPCODE_OR] = nir_op_ior,
1413 [TGSI_OPCODE_MOD] = nir_op_umod,
1414 [TGSI_OPCODE_XOR] = nir_op_ixor,
1415 [TGSI_OPCODE_SAD] = 0, /* XXX */
1416 [TGSI_OPCODE_TXF] = 0,
1417 [TGSI_OPCODE_TXQ] = 0,
1418
1419 [TGSI_OPCODE_CONT] = 0,
1420
1421 [TGSI_OPCODE_EMIT] = 0, /* XXX */
1422 [TGSI_OPCODE_ENDPRIM] = 0, /* XXX */
1423
1424 [TGSI_OPCODE_BGNLOOP] = 0,
1425 [TGSI_OPCODE_BGNSUB] = 0, /* XXX: no function calls */
1426 [TGSI_OPCODE_ENDLOOP] = 0,
1427 [TGSI_OPCODE_ENDSUB] = 0, /* XXX: no function calls */
1428
1429 [TGSI_OPCODE_TXQ_LZ] = 0,
1430 [TGSI_OPCODE_NOP] = 0,
1431 [TGSI_OPCODE_FSEQ] = nir_op_feq,
1432 [TGSI_OPCODE_FSGE] = nir_op_fge,
1433 [TGSI_OPCODE_FSLT] = nir_op_flt,
1434 [TGSI_OPCODE_FSNE] = nir_op_fne,
1435
1436 /* No control flow yet */
1437 [TGSI_OPCODE_CALLNZ] = 0, /* XXX */
1438 [TGSI_OPCODE_BREAKC] = 0, /* not emitted by glsl_to_tgsi.cpp */
1439
1440 [TGSI_OPCODE_KILL_IF] = 0,
1441
1442 [TGSI_OPCODE_END] = 0,
1443
1444 [TGSI_OPCODE_F2I] = nir_op_f2i,
1445 [TGSI_OPCODE_IDIV] = nir_op_idiv,
1446 [TGSI_OPCODE_IMAX] = nir_op_imax,
1447 [TGSI_OPCODE_IMIN] = nir_op_imin,
1448 [TGSI_OPCODE_INEG] = nir_op_ineg,
1449 [TGSI_OPCODE_ISGE] = nir_op_ige,
1450 [TGSI_OPCODE_ISHR] = nir_op_ishr,
1451 [TGSI_OPCODE_ISLT] = nir_op_ilt,
1452 [TGSI_OPCODE_F2U] = nir_op_f2u,
1453 [TGSI_OPCODE_U2F] = nir_op_u2f,
1454 [TGSI_OPCODE_UADD] = nir_op_iadd,
1455 [TGSI_OPCODE_UDIV] = nir_op_udiv,
1456 [TGSI_OPCODE_UMAD] = 0,
1457 [TGSI_OPCODE_UMAX] = nir_op_umax,
1458 [TGSI_OPCODE_UMIN] = nir_op_umin,
1459 [TGSI_OPCODE_UMOD] = nir_op_umod,
1460 [TGSI_OPCODE_UMUL] = nir_op_imul,
1461 [TGSI_OPCODE_USEQ] = nir_op_ieq,
1462 [TGSI_OPCODE_USGE] = nir_op_uge,
1463 [TGSI_OPCODE_USHR] = nir_op_ushr,
1464 [TGSI_OPCODE_USLT] = nir_op_ult,
1465 [TGSI_OPCODE_USNE] = nir_op_ine,
1466
1467 [TGSI_OPCODE_SWITCH] = 0, /* not emitted by glsl_to_tgsi.cpp */
1468 [TGSI_OPCODE_CASE] = 0, /* not emitted by glsl_to_tgsi.cpp */
1469 [TGSI_OPCODE_DEFAULT] = 0, /* not emitted by glsl_to_tgsi.cpp */
1470 [TGSI_OPCODE_ENDSWITCH] = 0, /* not emitted by glsl_to_tgsi.cpp */
1471
1472 /* XXX: SAMPLE opcodes */
1473
1474 [TGSI_OPCODE_UARL] = nir_op_imov,
1475 [TGSI_OPCODE_UCMP] = 0,
1476 [TGSI_OPCODE_IABS] = nir_op_iabs,
1477 [TGSI_OPCODE_ISSG] = nir_op_isign,
1478
1479 /* XXX: atomics */
1480
1481 [TGSI_OPCODE_TEX2] = 0,
1482 [TGSI_OPCODE_TXB2] = 0,
1483 [TGSI_OPCODE_TXL2] = 0,
1484
1485 [TGSI_OPCODE_IMUL_HI] = nir_op_imul_high,
1486 [TGSI_OPCODE_UMUL_HI] = nir_op_umul_high,
1487
1488 [TGSI_OPCODE_TG4] = 0,
1489 [TGSI_OPCODE_LODQ] = 0, /* XXX */
1490
1491 [TGSI_OPCODE_IBFE] = nir_op_ibitfield_extract,
1492 [TGSI_OPCODE_UBFE] = nir_op_ubitfield_extract,
1493 [TGSI_OPCODE_BFI] = nir_op_bitfield_insert,
1494 [TGSI_OPCODE_BREV] = nir_op_bitfield_reverse,
1495 [TGSI_OPCODE_POPC] = nir_op_bit_count,
1496 [TGSI_OPCODE_LSB] = nir_op_find_lsb,
1497 [TGSI_OPCODE_IMSB] = nir_op_ifind_msb,
1498 [TGSI_OPCODE_UMSB] = nir_op_ifind_msb, /* XXX: signed vs unsigned */
1499
1500 [TGSI_OPCODE_INTERP_CENTROID] = 0, /* XXX */
1501 [TGSI_OPCODE_INTERP_SAMPLE] = 0, /* XXX */
1502 [TGSI_OPCODE_INTERP_OFFSET] = 0, /* XXX */
1503 };
1504
1505 static void
1506 ttn_emit_instruction(struct ttn_compile *c)
1507 {
1508 nir_builder *b = &c->build;
1509 struct tgsi_full_instruction *tgsi_inst = &c->token->FullInstruction;
1510 unsigned i;
1511 unsigned tgsi_op = tgsi_inst->Instruction.Opcode;
1512 struct tgsi_full_dst_register *tgsi_dst = &tgsi_inst->Dst[0];
1513
1514 if (tgsi_op == TGSI_OPCODE_END)
1515 return;
1516
1517 nir_ssa_def *src[TGSI_FULL_MAX_SRC_REGISTERS];
1518 for (i = 0; i < tgsi_inst->Instruction.NumSrcRegs; i++) {
1519 src[i] = ttn_get_src(c, &tgsi_inst->Src[i]);
1520 }
1521 nir_alu_dest dest = ttn_get_dest(c, tgsi_dst);
1522
1523 switch (tgsi_op) {
1524 case TGSI_OPCODE_RSQ:
1525 ttn_move_dest(b, dest, nir_frsq(b, ttn_channel(b, src[0], X)));
1526 break;
1527
1528 case TGSI_OPCODE_SQRT:
1529 ttn_move_dest(b, dest, nir_fsqrt(b, ttn_channel(b, src[0], X)));
1530 break;
1531
1532 case TGSI_OPCODE_RCP:
1533 ttn_move_dest(b, dest, nir_frcp(b, ttn_channel(b, src[0], X)));
1534 break;
1535
1536 case TGSI_OPCODE_EX2:
1537 ttn_move_dest(b, dest, nir_fexp2(b, ttn_channel(b, src[0], X)));
1538 break;
1539
1540 case TGSI_OPCODE_LG2:
1541 ttn_move_dest(b, dest, nir_flog2(b, ttn_channel(b, src[0], X)));
1542 break;
1543
1544 case TGSI_OPCODE_POW:
1545 ttn_move_dest(b, dest, nir_fpow(b,
1546 ttn_channel(b, src[0], X),
1547 ttn_channel(b, src[1], X)));
1548 break;
1549
1550 case TGSI_OPCODE_COS:
1551 ttn_move_dest(b, dest, nir_fcos(b, ttn_channel(b, src[0], X)));
1552 break;
1553
1554 case TGSI_OPCODE_SIN:
1555 ttn_move_dest(b, dest, nir_fsin(b, ttn_channel(b, src[0], X)));
1556 break;
1557
1558 case TGSI_OPCODE_ARL:
1559 ttn_arl(b, op_trans[tgsi_op], dest, src);
1560 break;
1561
1562 case TGSI_OPCODE_EXP:
1563 ttn_exp(b, op_trans[tgsi_op], dest, src);
1564 break;
1565
1566 case TGSI_OPCODE_LOG:
1567 ttn_log(b, op_trans[tgsi_op], dest, src);
1568 break;
1569
1570 case TGSI_OPCODE_DST:
1571 ttn_dst(b, op_trans[tgsi_op], dest, src);
1572 break;
1573
1574 case TGSI_OPCODE_LIT:
1575 ttn_lit(b, op_trans[tgsi_op], dest, src);
1576 break;
1577
1578 case TGSI_OPCODE_CLAMP:
1579 ttn_clamp(b, op_trans[tgsi_op], dest, src);
1580 break;
1581
1582 case TGSI_OPCODE_XPD:
1583 ttn_xpd(b, op_trans[tgsi_op], dest, src);
1584 break;
1585
1586 case TGSI_OPCODE_DP2:
1587 ttn_dp2(b, op_trans[tgsi_op], dest, src);
1588 break;
1589
1590 case TGSI_OPCODE_DP3:
1591 ttn_dp3(b, op_trans[tgsi_op], dest, src);
1592 break;
1593
1594 case TGSI_OPCODE_DP4:
1595 ttn_dp4(b, op_trans[tgsi_op], dest, src);
1596 break;
1597
1598 case TGSI_OPCODE_DP2A:
1599 ttn_dp2a(b, op_trans[tgsi_op], dest, src);
1600 break;
1601
1602 case TGSI_OPCODE_DPH:
1603 ttn_dph(b, op_trans[tgsi_op], dest, src);
1604 break;
1605
1606 case TGSI_OPCODE_UMAD:
1607 ttn_umad(b, op_trans[tgsi_op], dest, src);
1608 break;
1609
1610 case TGSI_OPCODE_LRP:
1611 ttn_move_dest(b, dest, nir_flrp(b, src[2], src[1], src[0]));
1612 break;
1613
1614 case TGSI_OPCODE_KILL:
1615 ttn_kill(b, op_trans[tgsi_op], dest, src);
1616 break;
1617
1618 case TGSI_OPCODE_ARR:
1619 ttn_arr(b, op_trans[tgsi_op], dest, src);
1620 break;
1621
1622 case TGSI_OPCODE_CMP:
1623 ttn_cmp(b, op_trans[tgsi_op], dest, src);
1624 break;
1625
1626 case TGSI_OPCODE_UCMP:
1627 ttn_ucmp(b, op_trans[tgsi_op], dest, src);
1628 break;
1629
1630 case TGSI_OPCODE_SCS:
1631 ttn_scs(b, op_trans[tgsi_op], dest, src);
1632 break;
1633
1634 case TGSI_OPCODE_SGT:
1635 ttn_sgt(b, op_trans[tgsi_op], dest, src);
1636 break;
1637
1638 case TGSI_OPCODE_SLE:
1639 ttn_sle(b, op_trans[tgsi_op], dest, src);
1640 break;
1641
1642 case TGSI_OPCODE_KILL_IF:
1643 ttn_kill_if(b, op_trans[tgsi_op], dest, src);
1644 break;
1645
1646 case TGSI_OPCODE_TEX:
1647 case TGSI_OPCODE_TXP:
1648 case TGSI_OPCODE_TXL:
1649 case TGSI_OPCODE_TXB:
1650 case TGSI_OPCODE_TXD:
1651 case TGSI_OPCODE_TXL2:
1652 case TGSI_OPCODE_TXB2:
1653 case TGSI_OPCODE_TXQ_LZ:
1654 case TGSI_OPCODE_TXF:
1655 case TGSI_OPCODE_TG4:
1656 ttn_tex(c, dest, src);
1657 break;
1658
1659 case TGSI_OPCODE_TXQ:
1660 ttn_txq(c, dest, src);
1661 break;
1662
1663 case TGSI_OPCODE_NOP:
1664 break;
1665
1666 case TGSI_OPCODE_IF:
1667 ttn_if(c, src[0], false);
1668 break;
1669
1670 case TGSI_OPCODE_UIF:
1671 ttn_if(c, src[0], true);
1672 break;
1673
1674 case TGSI_OPCODE_ELSE:
1675 ttn_else(c);
1676 break;
1677
1678 case TGSI_OPCODE_ENDIF:
1679 ttn_endif(c);
1680 break;
1681
1682 case TGSI_OPCODE_BGNLOOP:
1683 ttn_bgnloop(c);
1684 break;
1685
1686 case TGSI_OPCODE_BRK:
1687 ttn_brk(b);
1688 break;
1689
1690 case TGSI_OPCODE_CONT:
1691 ttn_cont(b);
1692 break;
1693
1694 case TGSI_OPCODE_ENDLOOP:
1695 ttn_endloop(c);
1696 break;
1697
1698 default:
1699 if (op_trans[tgsi_op] != 0 || tgsi_op == TGSI_OPCODE_MOV) {
1700 ttn_alu(b, op_trans[tgsi_op], dest, src);
1701 } else {
1702 fprintf(stderr, "unknown TGSI opcode: %s\n",
1703 tgsi_get_opcode_name(tgsi_op));
1704 abort();
1705 }
1706 break;
1707 }
1708
1709 if (tgsi_inst->Instruction.Saturate) {
1710 assert(!dest.dest.is_ssa);
1711 ttn_move_dest(b, dest, nir_fsat(b, ttn_src_for_dest(b, &dest)));
1712 }
1713
1714 /* if the dst has a matching var, append store_global to move
1715 * output from reg to var
1716 */
1717 nir_variable *var = ttn_get_var(c, tgsi_dst);
1718 if (var) {
1719 unsigned index = tgsi_dst->Register.Index;
1720 unsigned offset = c->temp_regs[index].offset;
1721 nir_intrinsic_instr *store =
1722 nir_intrinsic_instr_create(b->shader, nir_intrinsic_store_var);
1723 struct tgsi_ind_register *indirect = tgsi_dst->Register.Indirect ?
1724 &tgsi_dst->Indirect : NULL;
1725
1726 store->num_components = 4;
1727 store->variables[0] = ttn_array_deref(c, store, var, offset, indirect);
1728 store->src[0] = nir_src_for_reg(dest.dest.reg.reg);
1729
1730 nir_instr_insert_after_cf_list(b->cf_node_list, &store->instr);
1731 }
1732 }
1733
1734 /**
1735 * Puts a NIR intrinsic to store of each TGSI_FILE_OUTPUT value to the output
1736 * variables at the end of the shader.
1737 *
1738 * We don't generate these incrementally as the TGSI_FILE_OUTPUT values are
1739 * written, because there's no output load intrinsic, which means we couldn't
1740 * handle writemasks.
1741 */
1742 static void
1743 ttn_add_output_stores(struct ttn_compile *c)
1744 {
1745 nir_builder *b = &c->build;
1746
1747 foreach_list_typed(nir_variable, var, node, &b->shader->outputs) {
1748 unsigned array_len = MAX2(glsl_get_length(var->type), 1);
1749 unsigned i;
1750
1751 for (i = 0; i < array_len; i++) {
1752 nir_intrinsic_instr *store =
1753 nir_intrinsic_instr_create(b->shader, nir_intrinsic_store_output);
1754 unsigned loc = var->data.driver_location + i;
1755 store->num_components = 4;
1756 store->const_index[0] = loc;
1757 store->src[0].reg.reg = c->output_regs[loc].reg;
1758 store->src[0].reg.base_offset = c->output_regs[loc].offset;
1759 nir_instr_insert_after_cf_list(b->cf_node_list, &store->instr);
1760 }
1761 }
1762 }
1763
1764 struct nir_shader *
1765 tgsi_to_nir(const void *tgsi_tokens,
1766 const nir_shader_compiler_options *options)
1767 {
1768 struct tgsi_parse_context parser;
1769 struct tgsi_shader_info scan;
1770 struct ttn_compile *c;
1771 struct nir_shader *s;
1772 int ret;
1773
1774 c = rzalloc(NULL, struct ttn_compile);
1775 s = nir_shader_create(NULL, options);
1776
1777 nir_function *func = nir_function_create(s, "main");
1778 nir_function_overload *overload = nir_function_overload_create(func);
1779 nir_function_impl *impl = nir_function_impl_create(overload);
1780
1781 nir_builder_init(&c->build, impl);
1782 nir_builder_insert_after_cf_list(&c->build, &impl->body);
1783
1784 tgsi_scan_shader(tgsi_tokens, &scan);
1785 c->scan = &scan;
1786
1787 s->num_inputs = scan.file_max[TGSI_FILE_INPUT] + 1;
1788 s->num_uniforms = scan.const_file_max[0] + 1;
1789 s->num_outputs = scan.file_max[TGSI_FILE_OUTPUT] + 1;
1790
1791 c->output_regs = rzalloc_array(c, struct ttn_reg_info,
1792 scan.file_max[TGSI_FILE_OUTPUT] + 1);
1793 c->temp_regs = rzalloc_array(c, struct ttn_reg_info,
1794 scan.file_max[TGSI_FILE_TEMPORARY] + 1);
1795 c->imm_defs = rzalloc_array(c, nir_ssa_def *,
1796 scan.file_max[TGSI_FILE_IMMEDIATE] + 1);
1797
1798 c->num_samp_types = scan.file_max[TGSI_FILE_SAMPLER_VIEW] + 1;
1799 c->samp_types = rzalloc_array(c, nir_alu_type, c->num_samp_types);
1800
1801 c->if_stack = rzalloc_array(c, struct exec_list *,
1802 (scan.opcode_count[TGSI_OPCODE_IF] +
1803 scan.opcode_count[TGSI_OPCODE_UIF]) * 2);
1804 c->loop_stack = rzalloc_array(c, struct exec_list *,
1805 scan.opcode_count[TGSI_OPCODE_BGNLOOP]);
1806
1807 ret = tgsi_parse_init(&parser, tgsi_tokens);
1808 assert(ret == TGSI_PARSE_OK);
1809
1810 while (!tgsi_parse_end_of_tokens(&parser)) {
1811 tgsi_parse_token(&parser);
1812 c->token = &parser.FullToken;
1813
1814 switch (parser.FullToken.Token.Type) {
1815 case TGSI_TOKEN_TYPE_DECLARATION:
1816 ttn_emit_declaration(c);
1817 break;
1818
1819 case TGSI_TOKEN_TYPE_INSTRUCTION:
1820 ttn_emit_instruction(c);
1821 break;
1822
1823 case TGSI_TOKEN_TYPE_IMMEDIATE:
1824 ttn_emit_immediate(c);
1825 break;
1826 }
1827 }
1828
1829 tgsi_parse_free(&parser);
1830
1831 ttn_add_output_stores(c);
1832
1833 ralloc_free(c);
1834 return s;
1835 }