Merge remote-tracking branch 'mesa-public/master' into vulkan
[mesa.git] / src / gallium / auxiliary / nir / tgsi_to_nir.c
1 /*
2 * Copyright © 2014-2015 Broadcom
3 * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 #ifdef __GNUC__
26 #pragma GCC diagnostic ignored "-Wdeclaration-after-statement"
27 #endif
28
29 #include "util/ralloc.h"
30 #include "glsl/nir/nir.h"
31 #include "glsl/nir/nir_control_flow.h"
32 #include "glsl/nir/nir_builder.h"
33 #include "glsl/list.h"
34 #include "glsl/nir/shader_enums.h"
35
36 #include "nir/tgsi_to_nir.h"
37 #include "tgsi/tgsi_parse.h"
38 #include "tgsi/tgsi_dump.h"
39 #include "tgsi/tgsi_info.h"
40 #include "tgsi/tgsi_scan.h"
41
42 #define SWIZ(X, Y, Z, W) (unsigned[4]){ \
43 TGSI_SWIZZLE_##X, \
44 TGSI_SWIZZLE_##Y, \
45 TGSI_SWIZZLE_##Z, \
46 TGSI_SWIZZLE_##W, \
47 }
48
49 struct ttn_reg_info {
50 /** nir register containing this TGSI index. */
51 nir_register *reg;
52 nir_variable *var;
53 /** Offset (in vec4s) from the start of var for this TGSI index. */
54 int offset;
55 };
56
57 struct ttn_compile {
58 union tgsi_full_token *token;
59 nir_builder build;
60 struct tgsi_shader_info *scan;
61
62 struct ttn_reg_info *output_regs;
63 struct ttn_reg_info *temp_regs;
64 nir_ssa_def **imm_defs;
65
66 unsigned num_samp_types;
67 nir_alu_type *samp_types;
68
69 nir_register *addr_reg;
70
71 /**
72 * Stack of nir_cursors where instructions should be pushed as we pop
73 * back out of the control flow stack.
74 *
75 * For each IF/ELSE/ENDIF block, if_stack[if_stack_pos] has where the else
76 * instructions should be placed, and if_stack[if_stack_pos - 1] has where
77 * the next instructions outside of the if/then/else block go.
78 */
79 nir_cursor *if_stack;
80 unsigned if_stack_pos;
81
82 /**
83 * Stack of nir_cursors where instructions should be pushed as we pop
84 * back out of the control flow stack.
85 *
86 * loop_stack[loop_stack_pos - 1] contains the cf_node_list for the outside
87 * of the loop.
88 */
89 nir_cursor *loop_stack;
90 unsigned loop_stack_pos;
91
92 /* How many TGSI_FILE_IMMEDIATE vec4s have been parsed so far. */
93 unsigned next_imm;
94 };
95
96 #define ttn_swizzle(b, src, x, y, z, w) \
97 nir_swizzle(b, src, SWIZ(x, y, z, w), 4, false)
98 #define ttn_channel(b, src, swiz) \
99 nir_swizzle(b, src, SWIZ(swiz, swiz, swiz, swiz), 1, false)
100
101 static gl_varying_slot
102 tgsi_varying_semantic_to_slot(unsigned semantic, unsigned index)
103 {
104 switch (semantic) {
105 case TGSI_SEMANTIC_POSITION:
106 return VARYING_SLOT_POS;
107 case TGSI_SEMANTIC_COLOR:
108 if (index == 0)
109 return VARYING_SLOT_COL0;
110 else
111 return VARYING_SLOT_COL1;
112 case TGSI_SEMANTIC_BCOLOR:
113 if (index == 0)
114 return VARYING_SLOT_BFC0;
115 else
116 return VARYING_SLOT_BFC1;
117 case TGSI_SEMANTIC_FOG:
118 return VARYING_SLOT_FOGC;
119 case TGSI_SEMANTIC_PSIZE:
120 return VARYING_SLOT_PSIZ;
121 case TGSI_SEMANTIC_GENERIC:
122 return VARYING_SLOT_VAR0 + index;
123 case TGSI_SEMANTIC_FACE:
124 return VARYING_SLOT_FACE;
125 case TGSI_SEMANTIC_EDGEFLAG:
126 return VARYING_SLOT_EDGE;
127 case TGSI_SEMANTIC_PRIMID:
128 return VARYING_SLOT_PRIMITIVE_ID;
129 case TGSI_SEMANTIC_CLIPDIST:
130 if (index == 0)
131 return VARYING_SLOT_CLIP_DIST0;
132 else
133 return VARYING_SLOT_CLIP_DIST1;
134 case TGSI_SEMANTIC_CLIPVERTEX:
135 return VARYING_SLOT_CLIP_VERTEX;
136 case TGSI_SEMANTIC_TEXCOORD:
137 return VARYING_SLOT_TEX0 + index;
138 case TGSI_SEMANTIC_PCOORD:
139 return VARYING_SLOT_PNTC;
140 case TGSI_SEMANTIC_VIEWPORT_INDEX:
141 return VARYING_SLOT_VIEWPORT;
142 case TGSI_SEMANTIC_LAYER:
143 return VARYING_SLOT_LAYER;
144 default:
145 fprintf(stderr, "Bad TGSI semantic: %d/%d\n", semantic, index);
146 abort();
147 }
148 }
149
150 /* Temporary helper to remap back to TGSI style semantic name/index
151 * values, for use in drivers that haven't been converted to using
152 * VARYING_SLOT_
153 */
154 void
155 varying_slot_to_tgsi_semantic(gl_varying_slot slot,
156 unsigned *semantic_name, unsigned *semantic_index)
157 {
158 static const unsigned map[][2] = {
159 [VARYING_SLOT_POS] = { TGSI_SEMANTIC_POSITION, 0 },
160 [VARYING_SLOT_COL0] = { TGSI_SEMANTIC_COLOR, 0 },
161 [VARYING_SLOT_COL1] = { TGSI_SEMANTIC_COLOR, 1 },
162 [VARYING_SLOT_BFC0] = { TGSI_SEMANTIC_BCOLOR, 0 },
163 [VARYING_SLOT_BFC1] = { TGSI_SEMANTIC_BCOLOR, 1 },
164 [VARYING_SLOT_FOGC] = { TGSI_SEMANTIC_FOG, 0 },
165 [VARYING_SLOT_PSIZ] = { TGSI_SEMANTIC_PSIZE, 0 },
166 [VARYING_SLOT_FACE] = { TGSI_SEMANTIC_FACE, 0 },
167 [VARYING_SLOT_EDGE] = { TGSI_SEMANTIC_EDGEFLAG, 0 },
168 [VARYING_SLOT_PRIMITIVE_ID] = { TGSI_SEMANTIC_PRIMID, 0 },
169 [VARYING_SLOT_CLIP_DIST0] = { TGSI_SEMANTIC_CLIPDIST, 0 },
170 [VARYING_SLOT_CLIP_DIST1] = { TGSI_SEMANTIC_CLIPDIST, 1 },
171 [VARYING_SLOT_CLIP_VERTEX] = { TGSI_SEMANTIC_CLIPVERTEX, 0 },
172 [VARYING_SLOT_PNTC] = { TGSI_SEMANTIC_PCOORD, 0 },
173 [VARYING_SLOT_VIEWPORT] = { TGSI_SEMANTIC_VIEWPORT_INDEX, 0 },
174 [VARYING_SLOT_LAYER] = { TGSI_SEMANTIC_LAYER, 0 },
175 };
176
177 if (slot >= VARYING_SLOT_VAR0) {
178 *semantic_name = TGSI_SEMANTIC_GENERIC;
179 *semantic_index = slot - VARYING_SLOT_VAR0;
180 return;
181 }
182
183 if (slot >= VARYING_SLOT_TEX0 && slot <= VARYING_SLOT_TEX7) {
184 *semantic_name = TGSI_SEMANTIC_TEXCOORD;
185 *semantic_index = slot - VARYING_SLOT_TEX0;
186 return;
187 }
188
189 if (slot >= ARRAY_SIZE(map)) {
190 fprintf(stderr, "Unknown varying slot %d\n", slot);
191 abort();
192 }
193
194 *semantic_name = map[slot][0];
195 *semantic_index = map[slot][1];
196 }
197
198 /* Temporary helper to remap back to TGSI style semantic name/index
199 * values, for use in drivers that haven't been converted to using
200 * FRAG_RESULT_
201 */
202 void
203 frag_result_to_tgsi_semantic(gl_frag_result slot,
204 unsigned *semantic_name, unsigned *semantic_index)
205 {
206 static const unsigned map[][2] = {
207 [FRAG_RESULT_DEPTH] = { TGSI_SEMANTIC_POSITION, 0 },
208 [FRAG_RESULT_COLOR] = { TGSI_SEMANTIC_COLOR, -1 },
209 [FRAG_RESULT_DATA0 + 0] = { TGSI_SEMANTIC_COLOR, 0 },
210 [FRAG_RESULT_DATA0 + 1] = { TGSI_SEMANTIC_COLOR, 1 },
211 [FRAG_RESULT_DATA0 + 2] = { TGSI_SEMANTIC_COLOR, 2 },
212 [FRAG_RESULT_DATA0 + 3] = { TGSI_SEMANTIC_COLOR, 3 },
213 [FRAG_RESULT_DATA0 + 4] = { TGSI_SEMANTIC_COLOR, 4 },
214 [FRAG_RESULT_DATA0 + 5] = { TGSI_SEMANTIC_COLOR, 5 },
215 [FRAG_RESULT_DATA0 + 6] = { TGSI_SEMANTIC_COLOR, 6 },
216 [FRAG_RESULT_DATA0 + 7] = { TGSI_SEMANTIC_COLOR, 7 },
217 };
218
219 *semantic_name = map[slot][0];
220 *semantic_index = map[slot][1];
221 }
222
223 static nir_ssa_def *
224 ttn_src_for_dest(nir_builder *b, nir_alu_dest *dest)
225 {
226 nir_alu_src src;
227 memset(&src, 0, sizeof(src));
228
229 if (dest->dest.is_ssa)
230 src.src = nir_src_for_ssa(&dest->dest.ssa);
231 else {
232 assert(!dest->dest.reg.indirect);
233 src.src = nir_src_for_reg(dest->dest.reg.reg);
234 src.src.reg.base_offset = dest->dest.reg.base_offset;
235 }
236
237 for (int i = 0; i < 4; i++)
238 src.swizzle[i] = i;
239
240 return nir_fmov_alu(b, src, 4);
241 }
242
243 static void
244 ttn_emit_declaration(struct ttn_compile *c)
245 {
246 nir_builder *b = &c->build;
247 struct tgsi_full_declaration *decl = &c->token->FullDeclaration;
248 unsigned array_size = decl->Range.Last - decl->Range.First + 1;
249 unsigned file = decl->Declaration.File;
250 unsigned i;
251
252 if (file == TGSI_FILE_TEMPORARY) {
253 if (decl->Declaration.Array) {
254 /* for arrays, we create variables instead of registers: */
255 nir_variable *var = rzalloc(b->shader, nir_variable);
256
257 var->type = glsl_array_type(glsl_vec4_type(), array_size);
258 var->data.mode = nir_var_global;
259 var->name = ralloc_asprintf(var, "arr_%d", decl->Array.ArrayID);
260
261 exec_list_push_tail(&b->shader->globals, &var->node);
262
263 for (i = 0; i < array_size; i++) {
264 /* point all the matching slots to the same var,
265 * with appropriate offset set, mostly just so
266 * we know what to do when tgsi does a non-indirect
267 * access
268 */
269 c->temp_regs[decl->Range.First + i].reg = NULL;
270 c->temp_regs[decl->Range.First + i].var = var;
271 c->temp_regs[decl->Range.First + i].offset = i;
272 }
273 } else {
274 for (i = 0; i < array_size; i++) {
275 nir_register *reg = nir_local_reg_create(b->impl);
276 reg->num_components = 4;
277 c->temp_regs[decl->Range.First + i].reg = reg;
278 c->temp_regs[decl->Range.First + i].var = NULL;
279 c->temp_regs[decl->Range.First + i].offset = 0;
280 }
281 }
282 } else if (file == TGSI_FILE_ADDRESS) {
283 c->addr_reg = nir_local_reg_create(b->impl);
284 c->addr_reg->num_components = 4;
285 } else if (file == TGSI_FILE_SYSTEM_VALUE) {
286 /* Nothing to record for system values. */
287 } else if (file == TGSI_FILE_SAMPLER) {
288 /* Nothing to record for samplers. */
289 } else if (file == TGSI_FILE_SAMPLER_VIEW) {
290 struct tgsi_declaration_sampler_view *sview = &decl->SamplerView;
291 nir_alu_type type;
292
293 assert((sview->ReturnTypeX == sview->ReturnTypeY) &&
294 (sview->ReturnTypeX == sview->ReturnTypeZ) &&
295 (sview->ReturnTypeX == sview->ReturnTypeW));
296
297 switch (sview->ReturnTypeX) {
298 case TGSI_RETURN_TYPE_SINT:
299 type = nir_type_int;
300 break;
301 case TGSI_RETURN_TYPE_UINT:
302 type = nir_type_uint;
303 break;
304 case TGSI_RETURN_TYPE_FLOAT:
305 default:
306 type = nir_type_float;
307 break;
308 }
309
310 for (i = 0; i < array_size; i++) {
311 c->samp_types[decl->Range.First + i] = type;
312 }
313 } else {
314 bool is_array = (array_size > 1);
315
316 assert(file == TGSI_FILE_INPUT ||
317 file == TGSI_FILE_OUTPUT ||
318 file == TGSI_FILE_CONSTANT);
319
320 /* nothing to do for UBOs: */
321 if ((file == TGSI_FILE_CONSTANT) && decl->Declaration.Dimension)
322 return;
323
324 if ((file == TGSI_FILE_INPUT) || (file == TGSI_FILE_OUTPUT)) {
325 is_array = (is_array && decl->Declaration.Array &&
326 (decl->Array.ArrayID != 0));
327 }
328
329 for (i = 0; i < array_size; i++) {
330 unsigned idx = decl->Range.First + i;
331 nir_variable *var = rzalloc(b->shader, nir_variable);
332
333 var->data.driver_location = idx;
334
335 var->type = glsl_vec4_type();
336 if (is_array)
337 var->type = glsl_array_type(var->type, array_size);
338
339 switch (file) {
340 case TGSI_FILE_INPUT:
341 var->data.read_only = true;
342 var->data.mode = nir_var_shader_in;
343 var->name = ralloc_asprintf(var, "in_%d", idx);
344
345 if (c->scan->processor == TGSI_PROCESSOR_FRAGMENT) {
346 var->data.location =
347 tgsi_varying_semantic_to_slot(decl->Semantic.Name,
348 decl->Semantic.Index);
349 } else {
350 assert(!decl->Declaration.Semantic);
351 var->data.location = VERT_ATTRIB_GENERIC0 + idx;
352 }
353 var->data.index = 0;
354
355 /* We definitely need to translate the interpolation field, because
356 * nir_print will decode it.
357 */
358 switch (decl->Interp.Interpolate) {
359 case TGSI_INTERPOLATE_CONSTANT:
360 var->data.interpolation = INTERP_QUALIFIER_FLAT;
361 break;
362 case TGSI_INTERPOLATE_LINEAR:
363 var->data.interpolation = INTERP_QUALIFIER_NOPERSPECTIVE;
364 break;
365 case TGSI_INTERPOLATE_PERSPECTIVE:
366 var->data.interpolation = INTERP_QUALIFIER_SMOOTH;
367 break;
368 }
369
370 exec_list_push_tail(&b->shader->inputs, &var->node);
371 break;
372 case TGSI_FILE_OUTPUT: {
373 int semantic_name = decl->Semantic.Name;
374 int semantic_index = decl->Semantic.Index;
375 /* Since we can't load from outputs in the IR, we make temporaries
376 * for the outputs and emit stores to the real outputs at the end of
377 * the shader.
378 */
379 nir_register *reg = nir_local_reg_create(b->impl);
380 reg->num_components = 4;
381 if (is_array)
382 reg->num_array_elems = array_size;
383
384 var->data.mode = nir_var_shader_out;
385 var->name = ralloc_asprintf(var, "out_%d", idx);
386 var->data.index = 0;
387
388 if (c->scan->processor == TGSI_PROCESSOR_FRAGMENT) {
389 switch (semantic_name) {
390 case TGSI_SEMANTIC_COLOR: {
391 /* TODO tgsi loses some information, so we cannot
392 * actually differentiate here between DSB and MRT
393 * at this point. But so far no drivers using tgsi-
394 * to-nir support dual source blend:
395 */
396 bool dual_src_blend = false;
397 if (dual_src_blend && (semantic_index == 1)) {
398 var->data.location = FRAG_RESULT_DATA0;
399 var->data.index = 1;
400 } else {
401 if (c->scan->properties[TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS])
402 var->data.location = FRAG_RESULT_COLOR;
403 else
404 var->data.location = FRAG_RESULT_DATA0 + semantic_index;
405 }
406 break;
407 }
408 case TGSI_SEMANTIC_POSITION:
409 var->data.location = FRAG_RESULT_DEPTH;
410 break;
411 default:
412 fprintf(stderr, "Bad TGSI semantic: %d/%d\n",
413 decl->Semantic.Name, decl->Semantic.Index);
414 abort();
415 }
416 } else {
417 var->data.location =
418 tgsi_varying_semantic_to_slot(semantic_name, semantic_index);
419 }
420
421 if (is_array) {
422 unsigned j;
423 for (j = 0; j < array_size; j++) {
424 c->output_regs[idx + j].offset = i + j;
425 c->output_regs[idx + j].reg = reg;
426 }
427 } else {
428 c->output_regs[idx].offset = i;
429 c->output_regs[idx].reg = reg;
430 }
431
432 exec_list_push_tail(&b->shader->outputs, &var->node);
433 }
434 break;
435 case TGSI_FILE_CONSTANT:
436 var->data.mode = nir_var_uniform;
437 var->name = ralloc_asprintf(var, "uniform_%d", idx);
438
439 exec_list_push_tail(&b->shader->uniforms, &var->node);
440 break;
441 default:
442 unreachable("bad declaration file");
443 return;
444 }
445
446 if (is_array)
447 break;
448 }
449
450 }
451 }
452
453 static void
454 ttn_emit_immediate(struct ttn_compile *c)
455 {
456 nir_builder *b = &c->build;
457 struct tgsi_full_immediate *tgsi_imm = &c->token->FullImmediate;
458 nir_load_const_instr *load_const;
459 int i;
460
461 load_const = nir_load_const_instr_create(b->shader, 4);
462 c->imm_defs[c->next_imm] = &load_const->def;
463 c->next_imm++;
464
465 for (i = 0; i < 4; i++)
466 load_const->value.u[i] = tgsi_imm->u[i].Uint;
467
468 nir_builder_instr_insert(b, &load_const->instr);
469 }
470
471 static nir_ssa_def *
472 ttn_src_for_indirect(struct ttn_compile *c, struct tgsi_ind_register *indirect);
473
474 /* generate either a constant or indirect deref chain for accessing an
475 * array variable.
476 */
477 static nir_deref_var *
478 ttn_array_deref(struct ttn_compile *c, nir_intrinsic_instr *instr,
479 nir_variable *var, unsigned offset,
480 struct tgsi_ind_register *indirect)
481 {
482 nir_deref_var *deref = nir_deref_var_create(instr, var);
483 nir_deref_array *arr = nir_deref_array_create(deref);
484
485 arr->base_offset = offset;
486 arr->deref.type = glsl_get_array_element(var->type);
487
488 if (indirect) {
489 arr->deref_array_type = nir_deref_array_type_indirect;
490 arr->indirect = nir_src_for_ssa(ttn_src_for_indirect(c, indirect));
491 } else {
492 arr->deref_array_type = nir_deref_array_type_direct;
493 }
494
495 deref->deref.child = &arr->deref;
496
497 return deref;
498 }
499
500 static nir_src
501 ttn_src_for_file_and_index(struct ttn_compile *c, unsigned file, unsigned index,
502 struct tgsi_ind_register *indirect,
503 struct tgsi_dimension *dim,
504 struct tgsi_ind_register *dimind)
505 {
506 nir_builder *b = &c->build;
507 nir_src src;
508
509 memset(&src, 0, sizeof(src));
510
511 switch (file) {
512 case TGSI_FILE_TEMPORARY:
513 if (c->temp_regs[index].var) {
514 unsigned offset = c->temp_regs[index].offset;
515 nir_variable *var = c->temp_regs[index].var;
516 nir_intrinsic_instr *load;
517
518 load = nir_intrinsic_instr_create(b->shader,
519 nir_intrinsic_load_var);
520 load->num_components = 4;
521 load->variables[0] = ttn_array_deref(c, load, var, offset, indirect);
522
523 nir_ssa_dest_init(&load->instr, &load->dest, 4, NULL);
524 nir_builder_instr_insert(b, &load->instr);
525
526 src = nir_src_for_ssa(&load->dest.ssa);
527
528 } else {
529 assert(!indirect);
530 src.reg.reg = c->temp_regs[index].reg;
531 }
532 assert(!dim);
533 break;
534
535 case TGSI_FILE_ADDRESS:
536 src.reg.reg = c->addr_reg;
537 assert(!dim);
538 break;
539
540 case TGSI_FILE_IMMEDIATE:
541 src = nir_src_for_ssa(c->imm_defs[index]);
542 assert(!indirect);
543 assert(!dim);
544 break;
545
546 case TGSI_FILE_SYSTEM_VALUE: {
547 nir_intrinsic_instr *load;
548 nir_intrinsic_op op;
549 unsigned ncomp = 1;
550
551 assert(!indirect);
552 assert(!dim);
553
554 switch (c->scan->system_value_semantic_name[index]) {
555 case TGSI_SEMANTIC_VERTEXID_NOBASE:
556 op = nir_intrinsic_load_vertex_id_zero_base;
557 break;
558 case TGSI_SEMANTIC_VERTEXID:
559 op = nir_intrinsic_load_vertex_id;
560 break;
561 case TGSI_SEMANTIC_BASEVERTEX:
562 op = nir_intrinsic_load_base_vertex;
563 break;
564 case TGSI_SEMANTIC_INSTANCEID:
565 op = nir_intrinsic_load_instance_id;
566 break;
567 default:
568 unreachable("bad system value");
569 }
570
571 load = nir_intrinsic_instr_create(b->shader, op);
572 load->num_components = ncomp;
573
574 nir_ssa_dest_init(&load->instr, &load->dest, ncomp, NULL);
575 nir_builder_instr_insert(b, &load->instr);
576
577 src = nir_src_for_ssa(&load->dest.ssa);
578 break;
579 }
580
581 case TGSI_FILE_INPUT:
582 case TGSI_FILE_CONSTANT: {
583 nir_intrinsic_instr *load;
584 nir_intrinsic_op op;
585 unsigned srcn = 0;
586
587 switch (file) {
588 case TGSI_FILE_INPUT:
589 op = nir_intrinsic_load_input;
590 assert(!dim);
591 break;
592 case TGSI_FILE_CONSTANT:
593 if (dim) {
594 op = nir_intrinsic_load_ubo;
595 } else {
596 op = nir_intrinsic_load_uniform;
597 }
598 break;
599 default:
600 unreachable("No other load files supported");
601 break;
602 }
603
604 load = nir_intrinsic_instr_create(b->shader, op);
605
606 load->num_components = 4;
607 if (dim) {
608 if (dimind) {
609 load->src[srcn] =
610 ttn_src_for_file_and_index(c, dimind->File, dimind->Index,
611 NULL, NULL, NULL);
612 } else {
613 /* UBOs start at index 1 in TGSI: */
614 load->src[srcn] =
615 nir_src_for_ssa(nir_imm_int(b, dim->Index - 1));
616 }
617 srcn++;
618 }
619
620 nir_ssa_def *offset;
621 if (dim) {
622 /* UBO loads don't have a const_index[0] base offset. */
623 offset = nir_imm_int(b, index);
624 if (indirect) {
625 offset = nir_iadd(b, offset, ttn_src_for_indirect(c, indirect));
626 }
627 /* UBO offsets are in bytes, but TGSI gives them to us in vec4's */
628 offset = nir_ishl(b, offset, nir_imm_int(b, 4));
629 } else {
630 load->const_index[0] = index;
631 if (indirect) {
632 offset = ttn_src_for_indirect(c, indirect);
633 } else {
634 offset = nir_imm_int(b, 0);
635 }
636 }
637 load->src[srcn++] = nir_src_for_ssa(offset);
638
639 nir_ssa_dest_init(&load->instr, &load->dest, 4, NULL);
640 nir_builder_instr_insert(b, &load->instr);
641
642 src = nir_src_for_ssa(&load->dest.ssa);
643 break;
644 }
645
646 default:
647 unreachable("bad src file");
648 }
649
650
651 return src;
652 }
653
654 static nir_ssa_def *
655 ttn_src_for_indirect(struct ttn_compile *c, struct tgsi_ind_register *indirect)
656 {
657 nir_builder *b = &c->build;
658 nir_alu_src src;
659 memset(&src, 0, sizeof(src));
660 for (int i = 0; i < 4; i++)
661 src.swizzle[i] = indirect->Swizzle;
662 src.src = ttn_src_for_file_and_index(c,
663 indirect->File,
664 indirect->Index,
665 NULL, NULL, NULL);
666 return nir_imov_alu(b, src, 1);
667 }
668
669 static nir_alu_dest
670 ttn_get_dest(struct ttn_compile *c, struct tgsi_full_dst_register *tgsi_fdst)
671 {
672 struct tgsi_dst_register *tgsi_dst = &tgsi_fdst->Register;
673 nir_alu_dest dest;
674 unsigned index = tgsi_dst->Index;
675
676 memset(&dest, 0, sizeof(dest));
677
678 if (tgsi_dst->File == TGSI_FILE_TEMPORARY) {
679 if (c->temp_regs[index].var) {
680 nir_builder *b = &c->build;
681 nir_intrinsic_instr *load;
682 struct tgsi_ind_register *indirect =
683 tgsi_dst->Indirect ? &tgsi_fdst->Indirect : NULL;
684 nir_register *reg;
685
686 /* this works, because TGSI will give us a base offset
687 * (in case of indirect index) that points back into
688 * the array. Access can be direct or indirect, we
689 * don't really care. Just create a one-shot dst reg
690 * that will get store_var'd back into the array var
691 * at the end of ttn_emit_instruction()
692 */
693 reg = nir_local_reg_create(c->build.impl);
694 reg->num_components = 4;
695 dest.dest.reg.reg = reg;
696 dest.dest.reg.base_offset = 0;
697
698 /* since the alu op might not write to all components
699 * of the temporary, we must first do a load_var to
700 * get the previous array elements into the register.
701 * This is one area that NIR could use a bit of
702 * improvement (or opt pass to clean up the mess
703 * once things are scalarized)
704 */
705
706 load = nir_intrinsic_instr_create(c->build.shader,
707 nir_intrinsic_load_var);
708 load->num_components = 4;
709 load->variables[0] =
710 ttn_array_deref(c, load, c->temp_regs[index].var,
711 c->temp_regs[index].offset,
712 indirect);
713
714 load->dest = nir_dest_for_reg(reg);
715
716 nir_builder_instr_insert(b, &load->instr);
717 } else {
718 assert(!tgsi_dst->Indirect);
719 dest.dest.reg.reg = c->temp_regs[index].reg;
720 dest.dest.reg.base_offset = c->temp_regs[index].offset;
721 }
722 } else if (tgsi_dst->File == TGSI_FILE_OUTPUT) {
723 dest.dest.reg.reg = c->output_regs[index].reg;
724 dest.dest.reg.base_offset = c->output_regs[index].offset;
725 } else if (tgsi_dst->File == TGSI_FILE_ADDRESS) {
726 assert(index == 0);
727 dest.dest.reg.reg = c->addr_reg;
728 }
729
730 dest.write_mask = tgsi_dst->WriteMask;
731 dest.saturate = false;
732
733 if (tgsi_dst->Indirect && (tgsi_dst->File != TGSI_FILE_TEMPORARY)) {
734 nir_src *indirect = ralloc(c->build.shader, nir_src);
735 *indirect = nir_src_for_ssa(ttn_src_for_indirect(c, &tgsi_fdst->Indirect));
736 dest.dest.reg.indirect = indirect;
737 }
738
739 return dest;
740 }
741
742 static nir_variable *
743 ttn_get_var(struct ttn_compile *c, struct tgsi_full_dst_register *tgsi_fdst)
744 {
745 struct tgsi_dst_register *tgsi_dst = &tgsi_fdst->Register;
746 unsigned index = tgsi_dst->Index;
747
748 if (tgsi_dst->File == TGSI_FILE_TEMPORARY) {
749 /* we should not have an indirect when there is no var! */
750 if (!c->temp_regs[index].var)
751 assert(!tgsi_dst->Indirect);
752 return c->temp_regs[index].var;
753 }
754
755 return NULL;
756 }
757
758 static nir_ssa_def *
759 ttn_get_src(struct ttn_compile *c, struct tgsi_full_src_register *tgsi_fsrc)
760 {
761 nir_builder *b = &c->build;
762 struct tgsi_src_register *tgsi_src = &tgsi_fsrc->Register;
763 unsigned tgsi_opcode = c->token->FullInstruction.Instruction.Opcode;
764 unsigned tgsi_src_type = tgsi_opcode_infer_src_type(tgsi_opcode);
765 bool src_is_float = !(tgsi_src_type == TGSI_TYPE_SIGNED ||
766 tgsi_src_type == TGSI_TYPE_UNSIGNED);
767 nir_alu_src src;
768
769 memset(&src, 0, sizeof(src));
770
771 if (tgsi_src->File == TGSI_FILE_NULL) {
772 return nir_imm_float(b, 0.0);
773 } else if (tgsi_src->File == TGSI_FILE_SAMPLER) {
774 /* Only the index of the sampler gets used in texturing, and it will
775 * handle looking that up on its own instead of using the nir_alu_src.
776 */
777 assert(!tgsi_src->Indirect);
778 return NULL;
779 } else {
780 struct tgsi_ind_register *ind = NULL;
781 struct tgsi_dimension *dim = NULL;
782 struct tgsi_ind_register *dimind = NULL;
783 if (tgsi_src->Indirect)
784 ind = &tgsi_fsrc->Indirect;
785 if (tgsi_src->Dimension) {
786 dim = &tgsi_fsrc->Dimension;
787 if (dim->Indirect)
788 dimind = &tgsi_fsrc->DimIndirect;
789 }
790 src.src = ttn_src_for_file_and_index(c,
791 tgsi_src->File,
792 tgsi_src->Index,
793 ind, dim, dimind);
794 }
795
796 src.swizzle[0] = tgsi_src->SwizzleX;
797 src.swizzle[1] = tgsi_src->SwizzleY;
798 src.swizzle[2] = tgsi_src->SwizzleZ;
799 src.swizzle[3] = tgsi_src->SwizzleW;
800
801 nir_ssa_def *def = nir_fmov_alu(b, src, 4);
802
803 if (tgsi_src->Absolute) {
804 if (src_is_float)
805 def = nir_fabs(b, def);
806 else
807 def = nir_iabs(b, def);
808 }
809
810 if (tgsi_src->Negate) {
811 if (src_is_float)
812 def = nir_fneg(b, def);
813 else
814 def = nir_ineg(b, def);
815 }
816
817 return def;
818 }
819
820 static void
821 ttn_alu(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
822 {
823 unsigned num_srcs = nir_op_infos[op].num_inputs;
824 nir_alu_instr *instr = nir_alu_instr_create(b->shader, op);
825 unsigned i;
826
827 for (i = 0; i < num_srcs; i++)
828 instr->src[i].src = nir_src_for_ssa(src[i]);
829
830 instr->dest = dest;
831 nir_builder_instr_insert(b, &instr->instr);
832 }
833
834 static void
835 ttn_move_dest_masked(nir_builder *b, nir_alu_dest dest,
836 nir_ssa_def *def, unsigned write_mask)
837 {
838 if (!(dest.write_mask & write_mask))
839 return;
840
841 nir_alu_instr *mov = nir_alu_instr_create(b->shader, nir_op_imov);
842 mov->dest = dest;
843 mov->dest.write_mask &= write_mask;
844 mov->src[0].src = nir_src_for_ssa(def);
845 for (unsigned i = def->num_components; i < 4; i++)
846 mov->src[0].swizzle[i] = def->num_components - 1;
847 nir_builder_instr_insert(b, &mov->instr);
848 }
849
850 static void
851 ttn_move_dest(nir_builder *b, nir_alu_dest dest, nir_ssa_def *def)
852 {
853 ttn_move_dest_masked(b, dest, def, TGSI_WRITEMASK_XYZW);
854 }
855
856 static void
857 ttn_arl(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
858 {
859 ttn_move_dest(b, dest, nir_f2i(b, nir_ffloor(b, src[0])));
860 }
861
862 /* EXP - Approximate Exponential Base 2
863 * dst.x = 2^{\lfloor src.x\rfloor}
864 * dst.y = src.x - \lfloor src.x\rfloor
865 * dst.z = 2^{src.x}
866 * dst.w = 1.0
867 */
868 static void
869 ttn_exp(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
870 {
871 nir_ssa_def *srcx = ttn_channel(b, src[0], X);
872
873 ttn_move_dest_masked(b, dest, nir_fexp2(b, nir_ffloor(b, srcx)),
874 TGSI_WRITEMASK_X);
875 ttn_move_dest_masked(b, dest, nir_fsub(b, srcx, nir_ffloor(b, srcx)),
876 TGSI_WRITEMASK_Y);
877 ttn_move_dest_masked(b, dest, nir_fexp2(b, srcx), TGSI_WRITEMASK_Z);
878 ttn_move_dest_masked(b, dest, nir_imm_float(b, 1.0), TGSI_WRITEMASK_W);
879 }
880
881 /* LOG - Approximate Logarithm Base 2
882 * dst.x = \lfloor\log_2{|src.x|}\rfloor
883 * dst.y = \frac{|src.x|}{2^{\lfloor\log_2{|src.x|}\rfloor}}
884 * dst.z = \log_2{|src.x|}
885 * dst.w = 1.0
886 */
887 static void
888 ttn_log(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
889 {
890 nir_ssa_def *abs_srcx = nir_fabs(b, ttn_channel(b, src[0], X));
891 nir_ssa_def *log2 = nir_flog2(b, abs_srcx);
892
893 ttn_move_dest_masked(b, dest, nir_ffloor(b, log2), TGSI_WRITEMASK_X);
894 ttn_move_dest_masked(b, dest,
895 nir_fdiv(b, abs_srcx, nir_fexp2(b, nir_ffloor(b, log2))),
896 TGSI_WRITEMASK_Y);
897 ttn_move_dest_masked(b, dest, nir_flog2(b, abs_srcx), TGSI_WRITEMASK_Z);
898 ttn_move_dest_masked(b, dest, nir_imm_float(b, 1.0), TGSI_WRITEMASK_W);
899 }
900
901 /* DST - Distance Vector
902 * dst.x = 1.0
903 * dst.y = src0.y \times src1.y
904 * dst.z = src0.z
905 * dst.w = src1.w
906 */
907 static void
908 ttn_dst(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
909 {
910 ttn_move_dest_masked(b, dest, nir_imm_float(b, 1.0), TGSI_WRITEMASK_X);
911 ttn_move_dest_masked(b, dest, nir_fmul(b, src[0], src[1]), TGSI_WRITEMASK_Y);
912 ttn_move_dest_masked(b, dest, nir_fmov(b, src[0]), TGSI_WRITEMASK_Z);
913 ttn_move_dest_masked(b, dest, nir_fmov(b, src[1]), TGSI_WRITEMASK_W);
914 }
915
916 /* LIT - Light Coefficients
917 * dst.x = 1.0
918 * dst.y = max(src.x, 0.0)
919 * dst.z = (src.x > 0.0) ? max(src.y, 0.0)^{clamp(src.w, -128.0, 128.0))} : 0
920 * dst.w = 1.0
921 */
922 static void
923 ttn_lit(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
924 {
925 ttn_move_dest_masked(b, dest, nir_imm_float(b, 1.0), TGSI_WRITEMASK_XW);
926
927 ttn_move_dest_masked(b, dest, nir_fmax(b, ttn_channel(b, src[0], X),
928 nir_imm_float(b, 0.0)), TGSI_WRITEMASK_Y);
929
930 if (dest.write_mask & TGSI_WRITEMASK_Z) {
931 nir_ssa_def *src0_y = ttn_channel(b, src[0], Y);
932 nir_ssa_def *wclamp = nir_fmax(b, nir_fmin(b, ttn_channel(b, src[0], W),
933 nir_imm_float(b, 128.0)),
934 nir_imm_float(b, -128.0));
935 nir_ssa_def *pow = nir_fpow(b, nir_fmax(b, src0_y, nir_imm_float(b, 0.0)),
936 wclamp);
937
938 ttn_move_dest_masked(b, dest,
939 nir_bcsel(b,
940 nir_fge(b,
941 nir_imm_float(b, 0.0),
942 ttn_channel(b, src[0], X)),
943 nir_imm_float(b, 0.0),
944 pow),
945 TGSI_WRITEMASK_Z);
946 }
947 }
948
949 /* SCS - Sine Cosine
950 * dst.x = \cos{src.x}
951 * dst.y = \sin{src.x}
952 * dst.z = 0.0
953 * dst.w = 1.0
954 */
955 static void
956 ttn_scs(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
957 {
958 ttn_move_dest_masked(b, dest, nir_fcos(b, ttn_channel(b, src[0], X)),
959 TGSI_WRITEMASK_X);
960 ttn_move_dest_masked(b, dest, nir_fsin(b, ttn_channel(b, src[0], X)),
961 TGSI_WRITEMASK_Y);
962 ttn_move_dest_masked(b, dest, nir_imm_float(b, 0.0), TGSI_WRITEMASK_Z);
963 ttn_move_dest_masked(b, dest, nir_imm_float(b, 1.0), TGSI_WRITEMASK_W);
964 }
965
966 static void
967 ttn_sle(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
968 {
969 ttn_move_dest(b, dest, nir_sge(b, src[1], src[0]));
970 }
971
972 static void
973 ttn_sgt(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
974 {
975 ttn_move_dest(b, dest, nir_slt(b, src[1], src[0]));
976 }
977
978 static void
979 ttn_clamp(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
980 {
981 ttn_move_dest(b, dest, nir_fmin(b, nir_fmax(b, src[0], src[1]), src[2]));
982 }
983
984 static void
985 ttn_xpd(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
986 {
987 ttn_move_dest_masked(b, dest,
988 nir_fsub(b,
989 nir_fmul(b,
990 ttn_swizzle(b, src[0], Y, Z, X, X),
991 ttn_swizzle(b, src[1], Z, X, Y, X)),
992 nir_fmul(b,
993 ttn_swizzle(b, src[1], Y, Z, X, X),
994 ttn_swizzle(b, src[0], Z, X, Y, X))),
995 TGSI_WRITEMASK_XYZ);
996 ttn_move_dest_masked(b, dest, nir_imm_float(b, 1.0), TGSI_WRITEMASK_W);
997 }
998
999 static void
1000 ttn_dp2a(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
1001 {
1002 ttn_move_dest(b, dest,
1003 ttn_channel(b, nir_fadd(b, nir_fdot2(b, src[0], src[1]),
1004 src[2]),
1005 X));
1006 }
1007
1008 static void
1009 ttn_dp2(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
1010 {
1011 ttn_move_dest(b, dest, nir_fdot2(b, src[0], src[1]));
1012 }
1013
1014 static void
1015 ttn_dp3(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
1016 {
1017 ttn_move_dest(b, dest, nir_fdot3(b, src[0], src[1]));
1018 }
1019
1020 static void
1021 ttn_dp4(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
1022 {
1023 ttn_move_dest(b, dest, nir_fdot4(b, src[0], src[1]));
1024 }
1025
1026 static void
1027 ttn_dph(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
1028 {
1029 ttn_move_dest(b, dest, nir_fadd(b, nir_fdot3(b, src[0], src[1]),
1030 ttn_channel(b, src[1], W)));
1031 }
1032
1033 static void
1034 ttn_umad(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
1035 {
1036 ttn_move_dest(b, dest, nir_iadd(b, nir_imul(b, src[0], src[1]), src[2]));
1037 }
1038
1039 static void
1040 ttn_arr(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
1041 {
1042 ttn_move_dest(b, dest, nir_ffloor(b, nir_fadd(b, src[0], nir_imm_float(b, 0.5))));
1043 }
1044
1045 static void
1046 ttn_cmp(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
1047 {
1048 ttn_move_dest(b, dest, nir_bcsel(b,
1049 nir_flt(b, src[0], nir_imm_float(b, 0.0)),
1050 src[1], src[2]));
1051 }
1052
1053 static void
1054 ttn_ucmp(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
1055 {
1056 ttn_move_dest(b, dest, nir_bcsel(b,
1057 nir_ine(b, src[0], nir_imm_int(b, 0)),
1058 src[1], src[2]));
1059 }
1060
1061 static void
1062 ttn_kill(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
1063 {
1064 nir_intrinsic_instr *discard =
1065 nir_intrinsic_instr_create(b->shader, nir_intrinsic_discard);
1066 nir_builder_instr_insert(b, &discard->instr);
1067 }
1068
1069 static void
1070 ttn_kill_if(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
1071 {
1072 nir_ssa_def *cmp = nir_bany4(b, nir_flt(b, src[0], nir_imm_float(b, 0.0)));
1073 nir_intrinsic_instr *discard =
1074 nir_intrinsic_instr_create(b->shader, nir_intrinsic_discard_if);
1075 discard->src[0] = nir_src_for_ssa(cmp);
1076 nir_builder_instr_insert(b, &discard->instr);
1077 }
1078
1079 static void
1080 ttn_if(struct ttn_compile *c, nir_ssa_def *src, bool is_uint)
1081 {
1082 nir_builder *b = &c->build;
1083
1084 src = ttn_channel(b, src, X);
1085
1086 nir_if *if_stmt = nir_if_create(b->shader);
1087 if (is_uint) {
1088 if_stmt->condition = nir_src_for_ssa(nir_ine(b, src, nir_imm_int(b, 0)));
1089 } else {
1090 if_stmt->condition = nir_src_for_ssa(nir_fne(b, src, nir_imm_int(b, 0)));
1091 }
1092 nir_builder_cf_insert(b, &if_stmt->cf_node);
1093
1094 c->if_stack[c->if_stack_pos] = nir_after_cf_node(&if_stmt->cf_node);
1095 c->if_stack_pos++;
1096
1097 b->cursor = nir_after_cf_list(&if_stmt->then_list);
1098
1099 c->if_stack[c->if_stack_pos] = nir_after_cf_list(&if_stmt->else_list);
1100 c->if_stack_pos++;
1101 }
1102
1103 static void
1104 ttn_else(struct ttn_compile *c)
1105 {
1106 nir_builder *b = &c->build;
1107
1108 b->cursor = c->if_stack[c->if_stack_pos - 1];
1109 }
1110
1111 static void
1112 ttn_endif(struct ttn_compile *c)
1113 {
1114 nir_builder *b = &c->build;
1115
1116 c->if_stack_pos -= 2;
1117 b->cursor = c->if_stack[c->if_stack_pos];
1118 }
1119
1120 static void
1121 ttn_bgnloop(struct ttn_compile *c)
1122 {
1123 nir_builder *b = &c->build;
1124
1125 nir_loop *loop = nir_loop_create(b->shader);
1126 nir_builder_cf_insert(b, &loop->cf_node);
1127
1128 c->loop_stack[c->loop_stack_pos] = nir_after_cf_node(&loop->cf_node);
1129 c->loop_stack_pos++;
1130
1131 b->cursor = nir_after_cf_list(&loop->body);
1132 }
1133
1134 static void
1135 ttn_cont(nir_builder *b)
1136 {
1137 nir_jump_instr *instr = nir_jump_instr_create(b->shader, nir_jump_continue);
1138 nir_builder_instr_insert(b, &instr->instr);
1139 }
1140
1141 static void
1142 ttn_brk(nir_builder *b)
1143 {
1144 nir_jump_instr *instr = nir_jump_instr_create(b->shader, nir_jump_break);
1145 nir_builder_instr_insert(b, &instr->instr);
1146 }
1147
1148 static void
1149 ttn_endloop(struct ttn_compile *c)
1150 {
1151 nir_builder *b = &c->build;
1152
1153 c->loop_stack_pos--;
1154 b->cursor = c->loop_stack[c->loop_stack_pos];
1155 }
1156
1157 static void
1158 setup_texture_info(nir_tex_instr *instr, unsigned texture)
1159 {
1160 switch (texture) {
1161 case TGSI_TEXTURE_BUFFER:
1162 instr->sampler_dim = GLSL_SAMPLER_DIM_BUF;
1163 break;
1164 case TGSI_TEXTURE_1D:
1165 instr->sampler_dim = GLSL_SAMPLER_DIM_1D;
1166 break;
1167 case TGSI_TEXTURE_1D_ARRAY:
1168 instr->sampler_dim = GLSL_SAMPLER_DIM_1D;
1169 instr->is_array = true;
1170 break;
1171 case TGSI_TEXTURE_SHADOW1D:
1172 instr->sampler_dim = GLSL_SAMPLER_DIM_1D;
1173 instr->is_shadow = true;
1174 break;
1175 case TGSI_TEXTURE_SHADOW1D_ARRAY:
1176 instr->sampler_dim = GLSL_SAMPLER_DIM_1D;
1177 instr->is_shadow = true;
1178 instr->is_array = true;
1179 break;
1180 case TGSI_TEXTURE_2D:
1181 instr->sampler_dim = GLSL_SAMPLER_DIM_2D;
1182 break;
1183 case TGSI_TEXTURE_2D_ARRAY:
1184 instr->sampler_dim = GLSL_SAMPLER_DIM_2D;
1185 instr->is_array = true;
1186 break;
1187 case TGSI_TEXTURE_2D_MSAA:
1188 instr->sampler_dim = GLSL_SAMPLER_DIM_MS;
1189 break;
1190 case TGSI_TEXTURE_2D_ARRAY_MSAA:
1191 instr->sampler_dim = GLSL_SAMPLER_DIM_MS;
1192 instr->is_array = true;
1193 break;
1194 case TGSI_TEXTURE_SHADOW2D:
1195 instr->sampler_dim = GLSL_SAMPLER_DIM_2D;
1196 instr->is_shadow = true;
1197 break;
1198 case TGSI_TEXTURE_SHADOW2D_ARRAY:
1199 instr->sampler_dim = GLSL_SAMPLER_DIM_2D;
1200 instr->is_shadow = true;
1201 instr->is_array = true;
1202 break;
1203 case TGSI_TEXTURE_3D:
1204 instr->sampler_dim = GLSL_SAMPLER_DIM_3D;
1205 break;
1206 case TGSI_TEXTURE_CUBE:
1207 instr->sampler_dim = GLSL_SAMPLER_DIM_CUBE;
1208 break;
1209 case TGSI_TEXTURE_CUBE_ARRAY:
1210 instr->sampler_dim = GLSL_SAMPLER_DIM_CUBE;
1211 instr->is_array = true;
1212 break;
1213 case TGSI_TEXTURE_SHADOWCUBE:
1214 instr->sampler_dim = GLSL_SAMPLER_DIM_CUBE;
1215 instr->is_shadow = true;
1216 break;
1217 case TGSI_TEXTURE_SHADOWCUBE_ARRAY:
1218 instr->sampler_dim = GLSL_SAMPLER_DIM_CUBE;
1219 instr->is_shadow = true;
1220 instr->is_array = true;
1221 break;
1222 case TGSI_TEXTURE_RECT:
1223 instr->sampler_dim = GLSL_SAMPLER_DIM_RECT;
1224 break;
1225 case TGSI_TEXTURE_SHADOWRECT:
1226 instr->sampler_dim = GLSL_SAMPLER_DIM_RECT;
1227 instr->is_shadow = true;
1228 break;
1229 default:
1230 fprintf(stderr, "Unknown TGSI texture target %d\n", texture);
1231 abort();
1232 }
1233 }
1234
1235 static void
1236 ttn_tex(struct ttn_compile *c, nir_alu_dest dest, nir_ssa_def **src)
1237 {
1238 nir_builder *b = &c->build;
1239 struct tgsi_full_instruction *tgsi_inst = &c->token->FullInstruction;
1240 nir_tex_instr *instr;
1241 nir_texop op;
1242 unsigned num_srcs, samp = 1, sview, i;
1243
1244 switch (tgsi_inst->Instruction.Opcode) {
1245 case TGSI_OPCODE_TEX:
1246 op = nir_texop_tex;
1247 num_srcs = 1;
1248 break;
1249 case TGSI_OPCODE_TEX2:
1250 op = nir_texop_tex;
1251 num_srcs = 1;
1252 samp = 2;
1253 break;
1254 case TGSI_OPCODE_TXP:
1255 op = nir_texop_tex;
1256 num_srcs = 2;
1257 break;
1258 case TGSI_OPCODE_TXB:
1259 op = nir_texop_txb;
1260 num_srcs = 2;
1261 break;
1262 case TGSI_OPCODE_TXB2:
1263 op = nir_texop_txb;
1264 num_srcs = 2;
1265 samp = 2;
1266 break;
1267 case TGSI_OPCODE_TXL:
1268 op = nir_texop_txl;
1269 num_srcs = 2;
1270 break;
1271 case TGSI_OPCODE_TXL2:
1272 op = nir_texop_txl;
1273 num_srcs = 2;
1274 samp = 2;
1275 break;
1276 case TGSI_OPCODE_TXF:
1277 if (tgsi_inst->Texture.Texture == TGSI_TEXTURE_2D_MSAA ||
1278 tgsi_inst->Texture.Texture == TGSI_TEXTURE_2D_ARRAY_MSAA) {
1279 op = nir_texop_txf_ms;
1280 } else {
1281 op = nir_texop_txf;
1282 }
1283 num_srcs = 2;
1284 break;
1285 case TGSI_OPCODE_TXD:
1286 op = nir_texop_txd;
1287 num_srcs = 3;
1288 samp = 3;
1289 break;
1290 case TGSI_OPCODE_LODQ:
1291 op = nir_texop_lod;
1292 num_srcs = 1;
1293 break;
1294
1295 default:
1296 fprintf(stderr, "unknown TGSI tex op %d\n", tgsi_inst->Instruction.Opcode);
1297 abort();
1298 }
1299
1300 if (tgsi_inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D ||
1301 tgsi_inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D_ARRAY ||
1302 tgsi_inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D ||
1303 tgsi_inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D_ARRAY ||
1304 tgsi_inst->Texture.Texture == TGSI_TEXTURE_SHADOWRECT ||
1305 tgsi_inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE ||
1306 tgsi_inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY) {
1307 num_srcs++;
1308 }
1309
1310 num_srcs += tgsi_inst->Texture.NumOffsets;
1311
1312 instr = nir_tex_instr_create(b->shader, num_srcs);
1313 instr->op = op;
1314
1315 setup_texture_info(instr, tgsi_inst->Texture.Texture);
1316
1317 switch (instr->sampler_dim) {
1318 case GLSL_SAMPLER_DIM_1D:
1319 case GLSL_SAMPLER_DIM_BUF:
1320 instr->coord_components = 1;
1321 break;
1322 case GLSL_SAMPLER_DIM_2D:
1323 case GLSL_SAMPLER_DIM_RECT:
1324 case GLSL_SAMPLER_DIM_EXTERNAL:
1325 case GLSL_SAMPLER_DIM_MS:
1326 instr->coord_components = 2;
1327 break;
1328 case GLSL_SAMPLER_DIM_3D:
1329 case GLSL_SAMPLER_DIM_CUBE:
1330 instr->coord_components = 3;
1331 break;
1332 }
1333
1334 if (instr->is_array)
1335 instr->coord_components++;
1336
1337 assert(tgsi_inst->Src[samp].Register.File == TGSI_FILE_SAMPLER);
1338 instr->sampler_index = tgsi_inst->Src[samp].Register.Index;
1339
1340 /* TODO if we supported any opc's which take an explicit SVIEW
1341 * src, we would use that here instead. But for the "legacy"
1342 * texture opc's the SVIEW index is same as SAMP index:
1343 */
1344 sview = instr->sampler_index;
1345
1346 if (op == nir_texop_lod) {
1347 instr->dest_type = nir_type_float;
1348 } else if (sview < c->num_samp_types) {
1349 instr->dest_type = c->samp_types[sview];
1350 } else {
1351 instr->dest_type = nir_type_float;
1352 }
1353
1354 unsigned src_number = 0;
1355
1356 instr->src[src_number].src =
1357 nir_src_for_ssa(nir_swizzle(b, src[0], SWIZ(X, Y, Z, W),
1358 instr->coord_components, false));
1359 instr->src[src_number].src_type = nir_tex_src_coord;
1360 src_number++;
1361
1362 if (tgsi_inst->Instruction.Opcode == TGSI_OPCODE_TXP) {
1363 instr->src[src_number].src = nir_src_for_ssa(ttn_channel(b, src[0], W));
1364 instr->src[src_number].src_type = nir_tex_src_projector;
1365 src_number++;
1366 }
1367
1368 if (tgsi_inst->Instruction.Opcode == TGSI_OPCODE_TXB) {
1369 instr->src[src_number].src = nir_src_for_ssa(ttn_channel(b, src[0], W));
1370 instr->src[src_number].src_type = nir_tex_src_bias;
1371 src_number++;
1372 }
1373
1374 if (tgsi_inst->Instruction.Opcode == TGSI_OPCODE_TXB2) {
1375 instr->src[src_number].src = nir_src_for_ssa(ttn_channel(b, src[1], X));
1376 instr->src[src_number].src_type = nir_tex_src_bias;
1377 src_number++;
1378 }
1379
1380 if (tgsi_inst->Instruction.Opcode == TGSI_OPCODE_TXL) {
1381 instr->src[src_number].src = nir_src_for_ssa(ttn_channel(b, src[0], W));
1382 instr->src[src_number].src_type = nir_tex_src_lod;
1383 src_number++;
1384 }
1385
1386 if (tgsi_inst->Instruction.Opcode == TGSI_OPCODE_TXL2) {
1387 instr->src[src_number].src = nir_src_for_ssa(ttn_channel(b, src[1], X));
1388 instr->src[src_number].src_type = nir_tex_src_lod;
1389 src_number++;
1390 }
1391
1392 if (tgsi_inst->Instruction.Opcode == TGSI_OPCODE_TXF) {
1393 instr->src[src_number].src = nir_src_for_ssa(ttn_channel(b, src[0], W));
1394 if (op == nir_texop_txf_ms)
1395 instr->src[src_number].src_type = nir_tex_src_ms_index;
1396 else
1397 instr->src[src_number].src_type = nir_tex_src_lod;
1398 src_number++;
1399 }
1400
1401 if (tgsi_inst->Instruction.Opcode == TGSI_OPCODE_TXD) {
1402 instr->src[src_number].src =
1403 nir_src_for_ssa(nir_swizzle(b, src[1], SWIZ(X, Y, Z, W),
1404 instr->coord_components, false));
1405 instr->src[src_number].src_type = nir_tex_src_ddx;
1406 src_number++;
1407 instr->src[src_number].src =
1408 nir_src_for_ssa(nir_swizzle(b, src[2], SWIZ(X, Y, Z, W),
1409 instr->coord_components, false));
1410 instr->src[src_number].src_type = nir_tex_src_ddy;
1411 src_number++;
1412 }
1413
1414 if (instr->is_shadow) {
1415 if (instr->coord_components == 4)
1416 instr->src[src_number].src = nir_src_for_ssa(ttn_channel(b, src[1], X));
1417 else if (instr->coord_components == 3)
1418 instr->src[src_number].src = nir_src_for_ssa(ttn_channel(b, src[0], W));
1419 else
1420 instr->src[src_number].src = nir_src_for_ssa(ttn_channel(b, src[0], Z));
1421
1422 instr->src[src_number].src_type = nir_tex_src_comparitor;
1423 src_number++;
1424 }
1425
1426 for (i = 0; i < tgsi_inst->Texture.NumOffsets; i++) {
1427 struct tgsi_texture_offset *tex_offset = &tgsi_inst->TexOffsets[i];
1428 /* since TexOffset ins't using tgsi_full_src_register we get to
1429 * do some extra gymnastics:
1430 */
1431 nir_alu_src src;
1432
1433 memset(&src, 0, sizeof(src));
1434
1435 src.src = ttn_src_for_file_and_index(c,
1436 tex_offset->File,
1437 tex_offset->Index,
1438 NULL, NULL, NULL);
1439
1440 src.swizzle[0] = tex_offset->SwizzleX;
1441 src.swizzle[1] = tex_offset->SwizzleY;
1442 src.swizzle[2] = tex_offset->SwizzleZ;
1443 src.swizzle[3] = TGSI_SWIZZLE_W;
1444
1445 instr->src[src_number].src_type = nir_tex_src_offset;
1446 instr->src[src_number].src = nir_src_for_ssa(
1447 nir_fmov_alu(b, src, nir_tex_instr_src_size(instr, src_number)));
1448 src_number++;
1449 }
1450
1451 assert(src_number == num_srcs);
1452
1453 nir_ssa_dest_init(&instr->instr, &instr->dest, 4, NULL);
1454 nir_builder_instr_insert(b, &instr->instr);
1455
1456 /* Resolve the writemask on the texture op. */
1457 ttn_move_dest(b, dest, &instr->dest.ssa);
1458 }
1459
1460 /* TGSI_OPCODE_TXQ is actually two distinct operations:
1461 *
1462 * dst.x = texture\_width(unit, lod)
1463 * dst.y = texture\_height(unit, lod)
1464 * dst.z = texture\_depth(unit, lod)
1465 * dst.w = texture\_levels(unit)
1466 *
1467 * dst.xyz map to NIR txs opcode, and dst.w maps to query_levels
1468 */
1469 static void
1470 ttn_txq(struct ttn_compile *c, nir_alu_dest dest, nir_ssa_def **src)
1471 {
1472 nir_builder *b = &c->build;
1473 struct tgsi_full_instruction *tgsi_inst = &c->token->FullInstruction;
1474 nir_tex_instr *txs, *qlv;
1475
1476 txs = nir_tex_instr_create(b->shader, 1);
1477 txs->op = nir_texop_txs;
1478 setup_texture_info(txs, tgsi_inst->Texture.Texture);
1479
1480 qlv = nir_tex_instr_create(b->shader, 0);
1481 qlv->op = nir_texop_query_levels;
1482 setup_texture_info(qlv, tgsi_inst->Texture.Texture);
1483
1484 assert(tgsi_inst->Src[1].Register.File == TGSI_FILE_SAMPLER);
1485 txs->sampler_index = tgsi_inst->Src[1].Register.Index;
1486 qlv->sampler_index = tgsi_inst->Src[1].Register.Index;
1487
1488 /* only single src, the lod: */
1489 txs->src[0].src = nir_src_for_ssa(ttn_channel(b, src[0], X));
1490 txs->src[0].src_type = nir_tex_src_lod;
1491
1492 nir_ssa_dest_init(&txs->instr, &txs->dest, 3, NULL);
1493 nir_builder_instr_insert(b, &txs->instr);
1494
1495 nir_ssa_dest_init(&qlv->instr, &qlv->dest, 1, NULL);
1496 nir_builder_instr_insert(b, &qlv->instr);
1497
1498 ttn_move_dest_masked(b, dest, &txs->dest.ssa, TGSI_WRITEMASK_XYZ);
1499 ttn_move_dest_masked(b, dest, &qlv->dest.ssa, TGSI_WRITEMASK_W);
1500 }
1501
1502 static const nir_op op_trans[TGSI_OPCODE_LAST] = {
1503 [TGSI_OPCODE_ARL] = 0,
1504 [TGSI_OPCODE_MOV] = nir_op_fmov,
1505 [TGSI_OPCODE_LIT] = 0,
1506 [TGSI_OPCODE_RCP] = nir_op_frcp,
1507 [TGSI_OPCODE_RSQ] = nir_op_frsq,
1508 [TGSI_OPCODE_EXP] = 0,
1509 [TGSI_OPCODE_LOG] = 0,
1510 [TGSI_OPCODE_MUL] = nir_op_fmul,
1511 [TGSI_OPCODE_ADD] = nir_op_fadd,
1512 [TGSI_OPCODE_DP3] = 0,
1513 [TGSI_OPCODE_DP4] = 0,
1514 [TGSI_OPCODE_DST] = 0,
1515 [TGSI_OPCODE_MIN] = nir_op_fmin,
1516 [TGSI_OPCODE_MAX] = nir_op_fmax,
1517 [TGSI_OPCODE_SLT] = nir_op_slt,
1518 [TGSI_OPCODE_SGE] = nir_op_sge,
1519 [TGSI_OPCODE_MAD] = nir_op_ffma,
1520 [TGSI_OPCODE_SUB] = nir_op_fsub,
1521 [TGSI_OPCODE_LRP] = 0,
1522 [TGSI_OPCODE_SQRT] = nir_op_fsqrt,
1523 [TGSI_OPCODE_DP2A] = 0,
1524 [TGSI_OPCODE_FRC] = nir_op_ffract,
1525 [TGSI_OPCODE_CLAMP] = 0,
1526 [TGSI_OPCODE_FLR] = nir_op_ffloor,
1527 [TGSI_OPCODE_ROUND] = nir_op_fround_even,
1528 [TGSI_OPCODE_EX2] = nir_op_fexp2,
1529 [TGSI_OPCODE_LG2] = nir_op_flog2,
1530 [TGSI_OPCODE_POW] = nir_op_fpow,
1531 [TGSI_OPCODE_XPD] = 0,
1532 [TGSI_OPCODE_ABS] = nir_op_fabs,
1533 [TGSI_OPCODE_DPH] = 0,
1534 [TGSI_OPCODE_COS] = nir_op_fcos,
1535 [TGSI_OPCODE_DDX] = nir_op_fddx,
1536 [TGSI_OPCODE_DDY] = nir_op_fddy,
1537 [TGSI_OPCODE_KILL] = 0,
1538 [TGSI_OPCODE_PK2H] = 0, /* XXX */
1539 [TGSI_OPCODE_PK2US] = 0, /* XXX */
1540 [TGSI_OPCODE_PK4B] = 0, /* XXX */
1541 [TGSI_OPCODE_PK4UB] = 0, /* XXX */
1542 [TGSI_OPCODE_SEQ] = nir_op_seq,
1543 [TGSI_OPCODE_SGT] = 0,
1544 [TGSI_OPCODE_SIN] = nir_op_fsin,
1545 [TGSI_OPCODE_SNE] = nir_op_sne,
1546 [TGSI_OPCODE_SLE] = 0,
1547 [TGSI_OPCODE_TEX] = 0,
1548 [TGSI_OPCODE_TXD] = 0,
1549 [TGSI_OPCODE_TXP] = 0,
1550 [TGSI_OPCODE_UP2H] = 0, /* XXX */
1551 [TGSI_OPCODE_UP2US] = 0, /* XXX */
1552 [TGSI_OPCODE_UP4B] = 0, /* XXX */
1553 [TGSI_OPCODE_UP4UB] = 0, /* XXX */
1554 [TGSI_OPCODE_ARR] = 0,
1555
1556 /* No function calls, yet. */
1557 [TGSI_OPCODE_CAL] = 0, /* XXX */
1558 [TGSI_OPCODE_RET] = 0, /* XXX */
1559
1560 [TGSI_OPCODE_SSG] = nir_op_fsign,
1561 [TGSI_OPCODE_CMP] = 0,
1562 [TGSI_OPCODE_SCS] = 0,
1563 [TGSI_OPCODE_TXB] = 0,
1564 [TGSI_OPCODE_DIV] = nir_op_fdiv,
1565 [TGSI_OPCODE_DP2] = 0,
1566 [TGSI_OPCODE_DP2A] = 0,
1567 [TGSI_OPCODE_TXL] = 0,
1568
1569 [TGSI_OPCODE_BRK] = 0,
1570 [TGSI_OPCODE_IF] = 0,
1571 [TGSI_OPCODE_UIF] = 0,
1572 [TGSI_OPCODE_ELSE] = 0,
1573 [TGSI_OPCODE_ENDIF] = 0,
1574
1575 [TGSI_OPCODE_DDX_FINE] = nir_op_fddx_fine,
1576 [TGSI_OPCODE_DDY_FINE] = nir_op_fddy_fine,
1577
1578 [TGSI_OPCODE_PUSHA] = 0, /* XXX */
1579 [TGSI_OPCODE_POPA] = 0, /* XXX */
1580
1581 [TGSI_OPCODE_CEIL] = nir_op_fceil,
1582 [TGSI_OPCODE_I2F] = nir_op_i2f,
1583 [TGSI_OPCODE_NOT] = nir_op_inot,
1584 [TGSI_OPCODE_TRUNC] = nir_op_ftrunc,
1585 [TGSI_OPCODE_SHL] = nir_op_ishl,
1586 [TGSI_OPCODE_AND] = nir_op_iand,
1587 [TGSI_OPCODE_OR] = nir_op_ior,
1588 [TGSI_OPCODE_MOD] = nir_op_umod,
1589 [TGSI_OPCODE_XOR] = nir_op_ixor,
1590 [TGSI_OPCODE_SAD] = 0, /* XXX */
1591 [TGSI_OPCODE_TXF] = 0,
1592 [TGSI_OPCODE_TXQ] = 0,
1593
1594 [TGSI_OPCODE_CONT] = 0,
1595
1596 [TGSI_OPCODE_EMIT] = 0, /* XXX */
1597 [TGSI_OPCODE_ENDPRIM] = 0, /* XXX */
1598
1599 [TGSI_OPCODE_BGNLOOP] = 0,
1600 [TGSI_OPCODE_BGNSUB] = 0, /* XXX: no function calls */
1601 [TGSI_OPCODE_ENDLOOP] = 0,
1602 [TGSI_OPCODE_ENDSUB] = 0, /* XXX: no function calls */
1603
1604 [TGSI_OPCODE_TXQ_LZ] = 0,
1605 [TGSI_OPCODE_NOP] = 0,
1606 [TGSI_OPCODE_FSEQ] = nir_op_feq,
1607 [TGSI_OPCODE_FSGE] = nir_op_fge,
1608 [TGSI_OPCODE_FSLT] = nir_op_flt,
1609 [TGSI_OPCODE_FSNE] = nir_op_fne,
1610
1611 /* No control flow yet */
1612 [TGSI_OPCODE_CALLNZ] = 0, /* XXX */
1613 [TGSI_OPCODE_BREAKC] = 0, /* not emitted by glsl_to_tgsi.cpp */
1614
1615 [TGSI_OPCODE_KILL_IF] = 0,
1616
1617 [TGSI_OPCODE_END] = 0,
1618
1619 [TGSI_OPCODE_F2I] = nir_op_f2i,
1620 [TGSI_OPCODE_IDIV] = nir_op_idiv,
1621 [TGSI_OPCODE_IMAX] = nir_op_imax,
1622 [TGSI_OPCODE_IMIN] = nir_op_imin,
1623 [TGSI_OPCODE_INEG] = nir_op_ineg,
1624 [TGSI_OPCODE_ISGE] = nir_op_ige,
1625 [TGSI_OPCODE_ISHR] = nir_op_ishr,
1626 [TGSI_OPCODE_ISLT] = nir_op_ilt,
1627 [TGSI_OPCODE_F2U] = nir_op_f2u,
1628 [TGSI_OPCODE_U2F] = nir_op_u2f,
1629 [TGSI_OPCODE_UADD] = nir_op_iadd,
1630 [TGSI_OPCODE_UDIV] = nir_op_udiv,
1631 [TGSI_OPCODE_UMAD] = 0,
1632 [TGSI_OPCODE_UMAX] = nir_op_umax,
1633 [TGSI_OPCODE_UMIN] = nir_op_umin,
1634 [TGSI_OPCODE_UMOD] = nir_op_umod,
1635 [TGSI_OPCODE_UMUL] = nir_op_imul,
1636 [TGSI_OPCODE_USEQ] = nir_op_ieq,
1637 [TGSI_OPCODE_USGE] = nir_op_uge,
1638 [TGSI_OPCODE_USHR] = nir_op_ushr,
1639 [TGSI_OPCODE_USLT] = nir_op_ult,
1640 [TGSI_OPCODE_USNE] = nir_op_ine,
1641
1642 [TGSI_OPCODE_SWITCH] = 0, /* not emitted by glsl_to_tgsi.cpp */
1643 [TGSI_OPCODE_CASE] = 0, /* not emitted by glsl_to_tgsi.cpp */
1644 [TGSI_OPCODE_DEFAULT] = 0, /* not emitted by glsl_to_tgsi.cpp */
1645 [TGSI_OPCODE_ENDSWITCH] = 0, /* not emitted by glsl_to_tgsi.cpp */
1646
1647 /* XXX: SAMPLE opcodes */
1648
1649 [TGSI_OPCODE_UARL] = nir_op_imov,
1650 [TGSI_OPCODE_UCMP] = 0,
1651 [TGSI_OPCODE_IABS] = nir_op_iabs,
1652 [TGSI_OPCODE_ISSG] = nir_op_isign,
1653
1654 /* XXX: atomics */
1655
1656 [TGSI_OPCODE_TEX2] = 0,
1657 [TGSI_OPCODE_TXB2] = 0,
1658 [TGSI_OPCODE_TXL2] = 0,
1659
1660 [TGSI_OPCODE_IMUL_HI] = nir_op_imul_high,
1661 [TGSI_OPCODE_UMUL_HI] = nir_op_umul_high,
1662
1663 [TGSI_OPCODE_TG4] = 0,
1664 [TGSI_OPCODE_LODQ] = 0,
1665
1666 [TGSI_OPCODE_IBFE] = nir_op_ibitfield_extract,
1667 [TGSI_OPCODE_UBFE] = nir_op_ubitfield_extract,
1668 [TGSI_OPCODE_BFI] = nir_op_bitfield_insert,
1669 [TGSI_OPCODE_BREV] = nir_op_bitfield_reverse,
1670 [TGSI_OPCODE_POPC] = nir_op_bit_count,
1671 [TGSI_OPCODE_LSB] = nir_op_find_lsb,
1672 [TGSI_OPCODE_IMSB] = nir_op_ifind_msb,
1673 [TGSI_OPCODE_UMSB] = nir_op_ufind_msb,
1674
1675 [TGSI_OPCODE_INTERP_CENTROID] = 0, /* XXX */
1676 [TGSI_OPCODE_INTERP_SAMPLE] = 0, /* XXX */
1677 [TGSI_OPCODE_INTERP_OFFSET] = 0, /* XXX */
1678 };
1679
1680 static void
1681 ttn_emit_instruction(struct ttn_compile *c)
1682 {
1683 nir_builder *b = &c->build;
1684 struct tgsi_full_instruction *tgsi_inst = &c->token->FullInstruction;
1685 unsigned i;
1686 unsigned tgsi_op = tgsi_inst->Instruction.Opcode;
1687 struct tgsi_full_dst_register *tgsi_dst = &tgsi_inst->Dst[0];
1688
1689 if (tgsi_op == TGSI_OPCODE_END)
1690 return;
1691
1692 nir_ssa_def *src[TGSI_FULL_MAX_SRC_REGISTERS];
1693 for (i = 0; i < tgsi_inst->Instruction.NumSrcRegs; i++) {
1694 src[i] = ttn_get_src(c, &tgsi_inst->Src[i]);
1695 }
1696 nir_alu_dest dest = ttn_get_dest(c, tgsi_dst);
1697
1698 switch (tgsi_op) {
1699 case TGSI_OPCODE_RSQ:
1700 ttn_move_dest(b, dest, nir_frsq(b, ttn_channel(b, src[0], X)));
1701 break;
1702
1703 case TGSI_OPCODE_SQRT:
1704 ttn_move_dest(b, dest, nir_fsqrt(b, ttn_channel(b, src[0], X)));
1705 break;
1706
1707 case TGSI_OPCODE_RCP:
1708 ttn_move_dest(b, dest, nir_frcp(b, ttn_channel(b, src[0], X)));
1709 break;
1710
1711 case TGSI_OPCODE_EX2:
1712 ttn_move_dest(b, dest, nir_fexp2(b, ttn_channel(b, src[0], X)));
1713 break;
1714
1715 case TGSI_OPCODE_LG2:
1716 ttn_move_dest(b, dest, nir_flog2(b, ttn_channel(b, src[0], X)));
1717 break;
1718
1719 case TGSI_OPCODE_POW:
1720 ttn_move_dest(b, dest, nir_fpow(b,
1721 ttn_channel(b, src[0], X),
1722 ttn_channel(b, src[1], X)));
1723 break;
1724
1725 case TGSI_OPCODE_COS:
1726 ttn_move_dest(b, dest, nir_fcos(b, ttn_channel(b, src[0], X)));
1727 break;
1728
1729 case TGSI_OPCODE_SIN:
1730 ttn_move_dest(b, dest, nir_fsin(b, ttn_channel(b, src[0], X)));
1731 break;
1732
1733 case TGSI_OPCODE_ARL:
1734 ttn_arl(b, op_trans[tgsi_op], dest, src);
1735 break;
1736
1737 case TGSI_OPCODE_EXP:
1738 ttn_exp(b, op_trans[tgsi_op], dest, src);
1739 break;
1740
1741 case TGSI_OPCODE_LOG:
1742 ttn_log(b, op_trans[tgsi_op], dest, src);
1743 break;
1744
1745 case TGSI_OPCODE_DST:
1746 ttn_dst(b, op_trans[tgsi_op], dest, src);
1747 break;
1748
1749 case TGSI_OPCODE_LIT:
1750 ttn_lit(b, op_trans[tgsi_op], dest, src);
1751 break;
1752
1753 case TGSI_OPCODE_CLAMP:
1754 ttn_clamp(b, op_trans[tgsi_op], dest, src);
1755 break;
1756
1757 case TGSI_OPCODE_XPD:
1758 ttn_xpd(b, op_trans[tgsi_op], dest, src);
1759 break;
1760
1761 case TGSI_OPCODE_DP2:
1762 ttn_dp2(b, op_trans[tgsi_op], dest, src);
1763 break;
1764
1765 case TGSI_OPCODE_DP3:
1766 ttn_dp3(b, op_trans[tgsi_op], dest, src);
1767 break;
1768
1769 case TGSI_OPCODE_DP4:
1770 ttn_dp4(b, op_trans[tgsi_op], dest, src);
1771 break;
1772
1773 case TGSI_OPCODE_DP2A:
1774 ttn_dp2a(b, op_trans[tgsi_op], dest, src);
1775 break;
1776
1777 case TGSI_OPCODE_DPH:
1778 ttn_dph(b, op_trans[tgsi_op], dest, src);
1779 break;
1780
1781 case TGSI_OPCODE_UMAD:
1782 ttn_umad(b, op_trans[tgsi_op], dest, src);
1783 break;
1784
1785 case TGSI_OPCODE_LRP:
1786 ttn_move_dest(b, dest, nir_flrp(b, src[2], src[1], src[0]));
1787 break;
1788
1789 case TGSI_OPCODE_KILL:
1790 ttn_kill(b, op_trans[tgsi_op], dest, src);
1791 break;
1792
1793 case TGSI_OPCODE_ARR:
1794 ttn_arr(b, op_trans[tgsi_op], dest, src);
1795 break;
1796
1797 case TGSI_OPCODE_CMP:
1798 ttn_cmp(b, op_trans[tgsi_op], dest, src);
1799 break;
1800
1801 case TGSI_OPCODE_UCMP:
1802 ttn_ucmp(b, op_trans[tgsi_op], dest, src);
1803 break;
1804
1805 case TGSI_OPCODE_SCS:
1806 ttn_scs(b, op_trans[tgsi_op], dest, src);
1807 break;
1808
1809 case TGSI_OPCODE_SGT:
1810 ttn_sgt(b, op_trans[tgsi_op], dest, src);
1811 break;
1812
1813 case TGSI_OPCODE_SLE:
1814 ttn_sle(b, op_trans[tgsi_op], dest, src);
1815 break;
1816
1817 case TGSI_OPCODE_KILL_IF:
1818 ttn_kill_if(b, op_trans[tgsi_op], dest, src);
1819 break;
1820
1821 case TGSI_OPCODE_TEX:
1822 case TGSI_OPCODE_TXP:
1823 case TGSI_OPCODE_TXL:
1824 case TGSI_OPCODE_TXB:
1825 case TGSI_OPCODE_TXD:
1826 case TGSI_OPCODE_TEX2:
1827 case TGSI_OPCODE_TXL2:
1828 case TGSI_OPCODE_TXB2:
1829 case TGSI_OPCODE_TXQ_LZ:
1830 case TGSI_OPCODE_TXF:
1831 case TGSI_OPCODE_TG4:
1832 case TGSI_OPCODE_LODQ:
1833 ttn_tex(c, dest, src);
1834 break;
1835
1836 case TGSI_OPCODE_TXQ:
1837 ttn_txq(c, dest, src);
1838 break;
1839
1840 case TGSI_OPCODE_NOP:
1841 break;
1842
1843 case TGSI_OPCODE_IF:
1844 ttn_if(c, src[0], false);
1845 break;
1846
1847 case TGSI_OPCODE_UIF:
1848 ttn_if(c, src[0], true);
1849 break;
1850
1851 case TGSI_OPCODE_ELSE:
1852 ttn_else(c);
1853 break;
1854
1855 case TGSI_OPCODE_ENDIF:
1856 ttn_endif(c);
1857 break;
1858
1859 case TGSI_OPCODE_BGNLOOP:
1860 ttn_bgnloop(c);
1861 break;
1862
1863 case TGSI_OPCODE_BRK:
1864 ttn_brk(b);
1865 break;
1866
1867 case TGSI_OPCODE_CONT:
1868 ttn_cont(b);
1869 break;
1870
1871 case TGSI_OPCODE_ENDLOOP:
1872 ttn_endloop(c);
1873 break;
1874
1875 default:
1876 if (op_trans[tgsi_op] != 0 || tgsi_op == TGSI_OPCODE_MOV) {
1877 ttn_alu(b, op_trans[tgsi_op], dest, src);
1878 } else {
1879 fprintf(stderr, "unknown TGSI opcode: %s\n",
1880 tgsi_get_opcode_name(tgsi_op));
1881 abort();
1882 }
1883 break;
1884 }
1885
1886 if (tgsi_inst->Instruction.Saturate) {
1887 assert(!dest.dest.is_ssa);
1888 ttn_move_dest(b, dest, nir_fsat(b, ttn_src_for_dest(b, &dest)));
1889 }
1890
1891 /* if the dst has a matching var, append store_global to move
1892 * output from reg to var
1893 */
1894 nir_variable *var = ttn_get_var(c, tgsi_dst);
1895 if (var) {
1896 unsigned index = tgsi_dst->Register.Index;
1897 unsigned offset = c->temp_regs[index].offset;
1898 nir_intrinsic_instr *store =
1899 nir_intrinsic_instr_create(b->shader, nir_intrinsic_store_var);
1900 struct tgsi_ind_register *indirect = tgsi_dst->Register.Indirect ?
1901 &tgsi_dst->Indirect : NULL;
1902
1903 store->num_components = 4;
1904 store->variables[0] = ttn_array_deref(c, store, var, offset, indirect);
1905 store->src[0] = nir_src_for_reg(dest.dest.reg.reg);
1906
1907 nir_builder_instr_insert(b, &store->instr);
1908 }
1909 }
1910
1911 /**
1912 * Puts a NIR intrinsic to store of each TGSI_FILE_OUTPUT value to the output
1913 * variables at the end of the shader.
1914 *
1915 * We don't generate these incrementally as the TGSI_FILE_OUTPUT values are
1916 * written, because there's no output load intrinsic, which means we couldn't
1917 * handle writemasks.
1918 */
1919 static void
1920 ttn_add_output_stores(struct ttn_compile *c)
1921 {
1922 nir_builder *b = &c->build;
1923
1924 foreach_list_typed(nir_variable, var, node, &b->shader->outputs) {
1925 unsigned array_len = MAX2(glsl_get_length(var->type), 1);
1926 unsigned i;
1927
1928 for (i = 0; i < array_len; i++) {
1929 nir_intrinsic_instr *store =
1930 nir_intrinsic_instr_create(b->shader, nir_intrinsic_store_output);
1931 unsigned loc = var->data.driver_location + i;
1932 store->num_components = 4;
1933 store->src[0].reg.reg = c->output_regs[loc].reg;
1934 store->src[0].reg.base_offset = c->output_regs[loc].offset;
1935 store->const_index[0] = loc;
1936 store->src[1] = nir_src_for_ssa(nir_imm_int(b, 0));
1937 nir_builder_instr_insert(b, &store->instr);
1938 }
1939 }
1940 }
1941
1942 static gl_shader_stage
1943 tgsi_processor_to_shader_stage(unsigned processor)
1944 {
1945 switch (processor) {
1946 case TGSI_PROCESSOR_FRAGMENT: return MESA_SHADER_FRAGMENT;
1947 case TGSI_PROCESSOR_VERTEX: return MESA_SHADER_VERTEX;
1948 case TGSI_PROCESSOR_GEOMETRY: return MESA_SHADER_GEOMETRY;
1949 case TGSI_PROCESSOR_TESS_CTRL: return MESA_SHADER_TESS_CTRL;
1950 case TGSI_PROCESSOR_TESS_EVAL: return MESA_SHADER_TESS_EVAL;
1951 case TGSI_PROCESSOR_COMPUTE: return MESA_SHADER_COMPUTE;
1952 default:
1953 unreachable("invalid TGSI processor");
1954 };
1955 }
1956
1957 struct nir_shader *
1958 tgsi_to_nir(const void *tgsi_tokens,
1959 const nir_shader_compiler_options *options)
1960 {
1961 struct tgsi_parse_context parser;
1962 struct tgsi_shader_info scan;
1963 struct ttn_compile *c;
1964 struct nir_shader *s;
1965 int ret;
1966
1967 c = rzalloc(NULL, struct ttn_compile);
1968
1969 tgsi_scan_shader(tgsi_tokens, &scan);
1970 c->scan = &scan;
1971
1972 s = nir_shader_create(NULL, tgsi_processor_to_shader_stage(scan.processor),
1973 options);
1974
1975 nir_function *func = nir_function_create(s, "main");
1976 nir_function_overload *overload = nir_function_overload_create(func);
1977 nir_function_impl *impl = nir_function_impl_create(overload);
1978
1979 nir_builder_init(&c->build, impl);
1980 c->build.cursor = nir_after_cf_list(&impl->body);
1981
1982 s->num_inputs = scan.file_max[TGSI_FILE_INPUT] + 1;
1983 s->num_uniforms = scan.const_file_max[0] + 1;
1984 s->num_outputs = scan.file_max[TGSI_FILE_OUTPUT] + 1;
1985
1986 c->output_regs = rzalloc_array(c, struct ttn_reg_info,
1987 scan.file_max[TGSI_FILE_OUTPUT] + 1);
1988 c->temp_regs = rzalloc_array(c, struct ttn_reg_info,
1989 scan.file_max[TGSI_FILE_TEMPORARY] + 1);
1990 c->imm_defs = rzalloc_array(c, nir_ssa_def *,
1991 scan.file_max[TGSI_FILE_IMMEDIATE] + 1);
1992
1993 c->num_samp_types = scan.file_max[TGSI_FILE_SAMPLER_VIEW] + 1;
1994 c->samp_types = rzalloc_array(c, nir_alu_type, c->num_samp_types);
1995
1996 c->if_stack = rzalloc_array(c, nir_cursor,
1997 (scan.opcode_count[TGSI_OPCODE_IF] +
1998 scan.opcode_count[TGSI_OPCODE_UIF]) * 2);
1999 c->loop_stack = rzalloc_array(c, nir_cursor,
2000 scan.opcode_count[TGSI_OPCODE_BGNLOOP]);
2001
2002 ret = tgsi_parse_init(&parser, tgsi_tokens);
2003 assert(ret == TGSI_PARSE_OK);
2004
2005 while (!tgsi_parse_end_of_tokens(&parser)) {
2006 tgsi_parse_token(&parser);
2007 c->token = &parser.FullToken;
2008
2009 switch (parser.FullToken.Token.Type) {
2010 case TGSI_TOKEN_TYPE_DECLARATION:
2011 ttn_emit_declaration(c);
2012 break;
2013
2014 case TGSI_TOKEN_TYPE_INSTRUCTION:
2015 ttn_emit_instruction(c);
2016 break;
2017
2018 case TGSI_TOKEN_TYPE_IMMEDIATE:
2019 ttn_emit_immediate(c);
2020 break;
2021 }
2022 }
2023
2024 tgsi_parse_free(&parser);
2025
2026 ttn_add_output_stores(c);
2027
2028 ralloc_free(c);
2029 return s;
2030 }