9b194481277b3a1f9a93152eeff5a731dbf0a94c
[mesa.git] / src / gallium / auxiliary / nir / tgsi_to_nir.c
1 /*
2 * Copyright © 2014-2015 Broadcom
3 * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 #include "util/ralloc.h"
26 #include "glsl/nir/nir.h"
27 #include "glsl/nir/nir_control_flow.h"
28 #include "glsl/nir/nir_builder.h"
29 #include "glsl/list.h"
30 #include "glsl/nir/shader_enums.h"
31
32 #include "nir/tgsi_to_nir.h"
33 #include "tgsi/tgsi_parse.h"
34 #include "tgsi/tgsi_dump.h"
35 #include "tgsi/tgsi_info.h"
36 #include "tgsi/tgsi_scan.h"
37
38 #define SWIZ(X, Y, Z, W) (unsigned[4]){ \
39 TGSI_SWIZZLE_##X, \
40 TGSI_SWIZZLE_##Y, \
41 TGSI_SWIZZLE_##Z, \
42 TGSI_SWIZZLE_##W, \
43 }
44
45 struct ttn_reg_info {
46 /** nir register containing this TGSI index. */
47 nir_register *reg;
48 nir_variable *var;
49 /** Offset (in vec4s) from the start of var for this TGSI index. */
50 int offset;
51 };
52
53 struct ttn_compile {
54 union tgsi_full_token *token;
55 nir_builder build;
56 struct tgsi_shader_info *scan;
57
58 struct ttn_reg_info *output_regs;
59 struct ttn_reg_info *temp_regs;
60 nir_ssa_def **imm_defs;
61
62 unsigned num_samp_types;
63 nir_alu_type *samp_types;
64
65 nir_register *addr_reg;
66
67 /**
68 * Stack of nir_cursors where instructions should be pushed as we pop
69 * back out of the control flow stack.
70 *
71 * For each IF/ELSE/ENDIF block, if_stack[if_stack_pos] has where the else
72 * instructions should be placed, and if_stack[if_stack_pos - 1] has where
73 * the next instructions outside of the if/then/else block go.
74 */
75 nir_cursor *if_stack;
76 unsigned if_stack_pos;
77
78 /**
79 * Stack of nir_cursors where instructions should be pushed as we pop
80 * back out of the control flow stack.
81 *
82 * loop_stack[loop_stack_pos - 1] contains the cf_node_list for the outside
83 * of the loop.
84 */
85 nir_cursor *loop_stack;
86 unsigned loop_stack_pos;
87
88 /* How many TGSI_FILE_IMMEDIATE vec4s have been parsed so far. */
89 unsigned next_imm;
90 };
91
92 #define ttn_swizzle(b, src, x, y, z, w) \
93 nir_swizzle(b, src, SWIZ(x, y, z, w), 4, false)
94 #define ttn_channel(b, src, swiz) \
95 nir_swizzle(b, src, SWIZ(swiz, swiz, swiz, swiz), 1, false)
96
97 static gl_varying_slot
98 tgsi_varying_semantic_to_slot(unsigned semantic, unsigned index)
99 {
100 switch (semantic) {
101 case TGSI_SEMANTIC_POSITION:
102 return VARYING_SLOT_POS;
103 case TGSI_SEMANTIC_COLOR:
104 if (index == 0)
105 return VARYING_SLOT_COL0;
106 else
107 return VARYING_SLOT_COL1;
108 case TGSI_SEMANTIC_BCOLOR:
109 if (index == 0)
110 return VARYING_SLOT_BFC0;
111 else
112 return VARYING_SLOT_BFC1;
113 case TGSI_SEMANTIC_FOG:
114 return VARYING_SLOT_FOGC;
115 case TGSI_SEMANTIC_PSIZE:
116 return VARYING_SLOT_PSIZ;
117 case TGSI_SEMANTIC_GENERIC:
118 return VARYING_SLOT_VAR0 + index;
119 case TGSI_SEMANTIC_FACE:
120 return VARYING_SLOT_FACE;
121 case TGSI_SEMANTIC_EDGEFLAG:
122 return VARYING_SLOT_EDGE;
123 case TGSI_SEMANTIC_PRIMID:
124 return VARYING_SLOT_PRIMITIVE_ID;
125 case TGSI_SEMANTIC_CLIPDIST:
126 if (index == 0)
127 return VARYING_SLOT_CLIP_DIST0;
128 else
129 return VARYING_SLOT_CLIP_DIST1;
130 case TGSI_SEMANTIC_CLIPVERTEX:
131 return VARYING_SLOT_CLIP_VERTEX;
132 case TGSI_SEMANTIC_TEXCOORD:
133 return VARYING_SLOT_TEX0 + index;
134 case TGSI_SEMANTIC_PCOORD:
135 return VARYING_SLOT_PNTC;
136 case TGSI_SEMANTIC_VIEWPORT_INDEX:
137 return VARYING_SLOT_VIEWPORT;
138 case TGSI_SEMANTIC_LAYER:
139 return VARYING_SLOT_LAYER;
140 default:
141 fprintf(stderr, "Bad TGSI semantic: %d/%d\n", semantic, index);
142 abort();
143 }
144 }
145
146 /* Temporary helper to remap back to TGSI style semantic name/index
147 * values, for use in drivers that haven't been converted to using
148 * VARYING_SLOT_
149 */
150 void
151 varying_slot_to_tgsi_semantic(gl_varying_slot slot,
152 unsigned *semantic_name, unsigned *semantic_index)
153 {
154 static const unsigned map[][2] = {
155 [VARYING_SLOT_POS] = { TGSI_SEMANTIC_POSITION, 0 },
156 [VARYING_SLOT_COL0] = { TGSI_SEMANTIC_COLOR, 0 },
157 [VARYING_SLOT_COL1] = { TGSI_SEMANTIC_COLOR, 1 },
158 [VARYING_SLOT_BFC0] = { TGSI_SEMANTIC_BCOLOR, 0 },
159 [VARYING_SLOT_BFC1] = { TGSI_SEMANTIC_BCOLOR, 1 },
160 [VARYING_SLOT_FOGC] = { TGSI_SEMANTIC_FOG, 0 },
161 [VARYING_SLOT_PSIZ] = { TGSI_SEMANTIC_PSIZE, 0 },
162 [VARYING_SLOT_FACE] = { TGSI_SEMANTIC_FACE, 0 },
163 [VARYING_SLOT_EDGE] = { TGSI_SEMANTIC_EDGEFLAG, 0 },
164 [VARYING_SLOT_PRIMITIVE_ID] = { TGSI_SEMANTIC_PRIMID, 0 },
165 [VARYING_SLOT_CLIP_DIST0] = { TGSI_SEMANTIC_CLIPDIST, 0 },
166 [VARYING_SLOT_CLIP_DIST1] = { TGSI_SEMANTIC_CLIPDIST, 1 },
167 [VARYING_SLOT_CLIP_VERTEX] = { TGSI_SEMANTIC_CLIPVERTEX, 0 },
168 [VARYING_SLOT_PNTC] = { TGSI_SEMANTIC_PCOORD, 0 },
169 [VARYING_SLOT_VIEWPORT] = { TGSI_SEMANTIC_VIEWPORT_INDEX, 0 },
170 [VARYING_SLOT_LAYER] = { TGSI_SEMANTIC_LAYER, 0 },
171 };
172
173 if (slot >= VARYING_SLOT_VAR0) {
174 *semantic_name = TGSI_SEMANTIC_GENERIC;
175 *semantic_index = slot - VARYING_SLOT_VAR0;
176 return;
177 }
178
179 if (slot >= VARYING_SLOT_TEX0 && slot <= VARYING_SLOT_TEX7) {
180 *semantic_name = TGSI_SEMANTIC_TEXCOORD;
181 *semantic_index = slot - VARYING_SLOT_TEX0;
182 return;
183 }
184
185 if (slot >= ARRAY_SIZE(map)) {
186 fprintf(stderr, "Unknown varying slot %d\n", slot);
187 abort();
188 }
189
190 *semantic_name = map[slot][0];
191 *semantic_index = map[slot][1];
192 }
193
194 /* Temporary helper to remap back to TGSI style semantic name/index
195 * values, for use in drivers that haven't been converted to using
196 * FRAG_RESULT_
197 */
198 void
199 frag_result_to_tgsi_semantic(gl_frag_result slot,
200 unsigned *semantic_name, unsigned *semantic_index)
201 {
202 static const unsigned map[][2] = {
203 [FRAG_RESULT_DEPTH] = { TGSI_SEMANTIC_POSITION, 0 },
204 [FRAG_RESULT_COLOR] = { TGSI_SEMANTIC_COLOR, -1 },
205 [FRAG_RESULT_DATA0 + 0] = { TGSI_SEMANTIC_COLOR, 0 },
206 [FRAG_RESULT_DATA0 + 1] = { TGSI_SEMANTIC_COLOR, 1 },
207 [FRAG_RESULT_DATA0 + 2] = { TGSI_SEMANTIC_COLOR, 2 },
208 [FRAG_RESULT_DATA0 + 3] = { TGSI_SEMANTIC_COLOR, 3 },
209 [FRAG_RESULT_DATA0 + 4] = { TGSI_SEMANTIC_COLOR, 4 },
210 [FRAG_RESULT_DATA0 + 5] = { TGSI_SEMANTIC_COLOR, 5 },
211 [FRAG_RESULT_DATA0 + 6] = { TGSI_SEMANTIC_COLOR, 6 },
212 [FRAG_RESULT_DATA0 + 7] = { TGSI_SEMANTIC_COLOR, 7 },
213 };
214
215 *semantic_name = map[slot][0];
216 *semantic_index = map[slot][1];
217 }
218
219 static nir_ssa_def *
220 ttn_src_for_dest(nir_builder *b, nir_alu_dest *dest)
221 {
222 nir_alu_src src;
223 memset(&src, 0, sizeof(src));
224
225 if (dest->dest.is_ssa)
226 src.src = nir_src_for_ssa(&dest->dest.ssa);
227 else {
228 assert(!dest->dest.reg.indirect);
229 src.src = nir_src_for_reg(dest->dest.reg.reg);
230 src.src.reg.base_offset = dest->dest.reg.base_offset;
231 }
232
233 for (int i = 0; i < 4; i++)
234 src.swizzle[i] = i;
235
236 return nir_fmov_alu(b, src, 4);
237 }
238
239 static void
240 ttn_emit_declaration(struct ttn_compile *c)
241 {
242 nir_builder *b = &c->build;
243 struct tgsi_full_declaration *decl = &c->token->FullDeclaration;
244 unsigned array_size = decl->Range.Last - decl->Range.First + 1;
245 unsigned file = decl->Declaration.File;
246 unsigned i;
247
248 if (file == TGSI_FILE_TEMPORARY) {
249 if (decl->Declaration.Array) {
250 /* for arrays, we create variables instead of registers: */
251 nir_variable *var = rzalloc(b->shader, nir_variable);
252
253 var->type = glsl_array_type(glsl_vec4_type(), array_size);
254 var->data.mode = nir_var_global;
255 var->name = ralloc_asprintf(var, "arr_%d", decl->Array.ArrayID);
256
257 exec_list_push_tail(&b->shader->globals, &var->node);
258
259 for (i = 0; i < array_size; i++) {
260 /* point all the matching slots to the same var,
261 * with appropriate offset set, mostly just so
262 * we know what to do when tgsi does a non-indirect
263 * access
264 */
265 c->temp_regs[decl->Range.First + i].reg = NULL;
266 c->temp_regs[decl->Range.First + i].var = var;
267 c->temp_regs[decl->Range.First + i].offset = i;
268 }
269 } else {
270 for (i = 0; i < array_size; i++) {
271 nir_register *reg = nir_local_reg_create(b->impl);
272 reg->num_components = 4;
273 c->temp_regs[decl->Range.First + i].reg = reg;
274 c->temp_regs[decl->Range.First + i].var = NULL;
275 c->temp_regs[decl->Range.First + i].offset = 0;
276 }
277 }
278 } else if (file == TGSI_FILE_ADDRESS) {
279 c->addr_reg = nir_local_reg_create(b->impl);
280 c->addr_reg->num_components = 4;
281 } else if (file == TGSI_FILE_SYSTEM_VALUE) {
282 /* Nothing to record for system values. */
283 } else if (file == TGSI_FILE_SAMPLER) {
284 /* Nothing to record for samplers. */
285 } else if (file == TGSI_FILE_SAMPLER_VIEW) {
286 struct tgsi_declaration_sampler_view *sview = &decl->SamplerView;
287 nir_alu_type type;
288
289 assert((sview->ReturnTypeX == sview->ReturnTypeY) &&
290 (sview->ReturnTypeX == sview->ReturnTypeZ) &&
291 (sview->ReturnTypeX == sview->ReturnTypeW));
292
293 switch (sview->ReturnTypeX) {
294 case TGSI_RETURN_TYPE_SINT:
295 type = nir_type_int;
296 break;
297 case TGSI_RETURN_TYPE_UINT:
298 type = nir_type_uint;
299 break;
300 case TGSI_RETURN_TYPE_FLOAT:
301 default:
302 type = nir_type_float;
303 break;
304 }
305
306 for (i = 0; i < array_size; i++) {
307 c->samp_types[decl->Range.First + i] = type;
308 }
309 } else {
310 bool is_array = (array_size > 1);
311
312 assert(file == TGSI_FILE_INPUT ||
313 file == TGSI_FILE_OUTPUT ||
314 file == TGSI_FILE_CONSTANT);
315
316 /* nothing to do for UBOs: */
317 if ((file == TGSI_FILE_CONSTANT) && decl->Declaration.Dimension)
318 return;
319
320 if ((file == TGSI_FILE_INPUT) || (file == TGSI_FILE_OUTPUT)) {
321 is_array = (is_array && decl->Declaration.Array &&
322 (decl->Array.ArrayID != 0));
323 }
324
325 for (i = 0; i < array_size; i++) {
326 unsigned idx = decl->Range.First + i;
327 nir_variable *var = rzalloc(b->shader, nir_variable);
328
329 var->data.driver_location = idx;
330
331 var->type = glsl_vec4_type();
332 if (is_array)
333 var->type = glsl_array_type(var->type, array_size);
334
335 switch (file) {
336 case TGSI_FILE_INPUT:
337 var->data.read_only = true;
338 var->data.mode = nir_var_shader_in;
339 var->name = ralloc_asprintf(var, "in_%d", idx);
340
341 if (c->scan->processor == TGSI_PROCESSOR_FRAGMENT) {
342 var->data.location =
343 tgsi_varying_semantic_to_slot(decl->Semantic.Name,
344 decl->Semantic.Index);
345 } else {
346 assert(!decl->Declaration.Semantic);
347 var->data.location = VERT_ATTRIB_GENERIC0 + idx;
348 }
349 var->data.index = 0;
350
351 /* We definitely need to translate the interpolation field, because
352 * nir_print will decode it.
353 */
354 switch (decl->Interp.Interpolate) {
355 case TGSI_INTERPOLATE_CONSTANT:
356 var->data.interpolation = INTERP_QUALIFIER_FLAT;
357 break;
358 case TGSI_INTERPOLATE_LINEAR:
359 var->data.interpolation = INTERP_QUALIFIER_NOPERSPECTIVE;
360 break;
361 case TGSI_INTERPOLATE_PERSPECTIVE:
362 var->data.interpolation = INTERP_QUALIFIER_SMOOTH;
363 break;
364 }
365
366 exec_list_push_tail(&b->shader->inputs, &var->node);
367 break;
368 case TGSI_FILE_OUTPUT: {
369 int semantic_name = decl->Semantic.Name;
370 int semantic_index = decl->Semantic.Index;
371 /* Since we can't load from outputs in the IR, we make temporaries
372 * for the outputs and emit stores to the real outputs at the end of
373 * the shader.
374 */
375 nir_register *reg = nir_local_reg_create(b->impl);
376 reg->num_components = 4;
377 if (is_array)
378 reg->num_array_elems = array_size;
379
380 var->data.mode = nir_var_shader_out;
381 var->name = ralloc_asprintf(var, "out_%d", idx);
382 var->data.index = 0;
383
384 if (c->scan->processor == TGSI_PROCESSOR_FRAGMENT) {
385 switch (semantic_name) {
386 case TGSI_SEMANTIC_COLOR: {
387 /* TODO tgsi loses some information, so we cannot
388 * actually differentiate here between DSB and MRT
389 * at this point. But so far no drivers using tgsi-
390 * to-nir support dual source blend:
391 */
392 bool dual_src_blend = false;
393 if (dual_src_blend && (semantic_index == 1)) {
394 var->data.location = FRAG_RESULT_DATA0;
395 var->data.index = 1;
396 } else {
397 if (c->scan->properties[TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS])
398 var->data.location = FRAG_RESULT_COLOR;
399 else
400 var->data.location = FRAG_RESULT_DATA0 + semantic_index;
401 }
402 break;
403 }
404 case TGSI_SEMANTIC_POSITION:
405 var->data.location = FRAG_RESULT_DEPTH;
406 break;
407 default:
408 fprintf(stderr, "Bad TGSI semantic: %d/%d\n",
409 decl->Semantic.Name, decl->Semantic.Index);
410 abort();
411 }
412 } else {
413 var->data.location =
414 tgsi_varying_semantic_to_slot(semantic_name, semantic_index);
415 }
416
417 if (is_array) {
418 unsigned j;
419 for (j = 0; j < array_size; j++) {
420 c->output_regs[idx + j].offset = i + j;
421 c->output_regs[idx + j].reg = reg;
422 }
423 } else {
424 c->output_regs[idx].offset = i;
425 c->output_regs[idx].reg = reg;
426 }
427
428 exec_list_push_tail(&b->shader->outputs, &var->node);
429 }
430 break;
431 case TGSI_FILE_CONSTANT:
432 var->data.mode = nir_var_uniform;
433 var->name = ralloc_asprintf(var, "uniform_%d", idx);
434
435 exec_list_push_tail(&b->shader->uniforms, &var->node);
436 break;
437 default:
438 unreachable("bad declaration file");
439 return;
440 }
441
442 if (is_array)
443 break;
444 }
445
446 }
447 }
448
449 static void
450 ttn_emit_immediate(struct ttn_compile *c)
451 {
452 nir_builder *b = &c->build;
453 struct tgsi_full_immediate *tgsi_imm = &c->token->FullImmediate;
454 nir_load_const_instr *load_const;
455 int i;
456
457 load_const = nir_load_const_instr_create(b->shader, 4);
458 c->imm_defs[c->next_imm] = &load_const->def;
459 c->next_imm++;
460
461 for (i = 0; i < 4; i++)
462 load_const->value.u[i] = tgsi_imm->u[i].Uint;
463
464 nir_builder_instr_insert(b, &load_const->instr);
465 }
466
467 static nir_ssa_def *
468 ttn_src_for_indirect(struct ttn_compile *c, struct tgsi_ind_register *indirect);
469
470 /* generate either a constant or indirect deref chain for accessing an
471 * array variable.
472 */
473 static nir_deref_var *
474 ttn_array_deref(struct ttn_compile *c, nir_intrinsic_instr *instr,
475 nir_variable *var, unsigned offset,
476 struct tgsi_ind_register *indirect)
477 {
478 nir_deref_var *deref = nir_deref_var_create(instr, var);
479 nir_deref_array *arr = nir_deref_array_create(deref);
480
481 arr->base_offset = offset;
482 arr->deref.type = glsl_get_array_element(var->type);
483
484 if (indirect) {
485 arr->deref_array_type = nir_deref_array_type_indirect;
486 arr->indirect = nir_src_for_ssa(ttn_src_for_indirect(c, indirect));
487 } else {
488 arr->deref_array_type = nir_deref_array_type_direct;
489 }
490
491 deref->deref.child = &arr->deref;
492
493 return deref;
494 }
495
496 static nir_src
497 ttn_src_for_file_and_index(struct ttn_compile *c, unsigned file, unsigned index,
498 struct tgsi_ind_register *indirect,
499 struct tgsi_dimension *dim,
500 struct tgsi_ind_register *dimind)
501 {
502 nir_builder *b = &c->build;
503 nir_src src;
504
505 memset(&src, 0, sizeof(src));
506
507 switch (file) {
508 case TGSI_FILE_TEMPORARY:
509 if (c->temp_regs[index].var) {
510 unsigned offset = c->temp_regs[index].offset;
511 nir_variable *var = c->temp_regs[index].var;
512 nir_intrinsic_instr *load;
513
514 load = nir_intrinsic_instr_create(b->shader,
515 nir_intrinsic_load_var);
516 load->num_components = 4;
517 load->variables[0] = ttn_array_deref(c, load, var, offset, indirect);
518
519 nir_ssa_dest_init(&load->instr, &load->dest, 4, NULL);
520 nir_builder_instr_insert(b, &load->instr);
521
522 src = nir_src_for_ssa(&load->dest.ssa);
523
524 } else {
525 assert(!indirect);
526 src.reg.reg = c->temp_regs[index].reg;
527 }
528 assert(!dim);
529 break;
530
531 case TGSI_FILE_ADDRESS:
532 src.reg.reg = c->addr_reg;
533 assert(!dim);
534 break;
535
536 case TGSI_FILE_IMMEDIATE:
537 src = nir_src_for_ssa(c->imm_defs[index]);
538 assert(!indirect);
539 assert(!dim);
540 break;
541
542 case TGSI_FILE_SYSTEM_VALUE: {
543 nir_intrinsic_instr *load;
544 nir_intrinsic_op op;
545 unsigned ncomp = 1;
546
547 assert(!indirect);
548 assert(!dim);
549
550 switch (c->scan->system_value_semantic_name[index]) {
551 case TGSI_SEMANTIC_VERTEXID_NOBASE:
552 op = nir_intrinsic_load_vertex_id_zero_base;
553 break;
554 case TGSI_SEMANTIC_VERTEXID:
555 op = nir_intrinsic_load_vertex_id;
556 break;
557 case TGSI_SEMANTIC_BASEVERTEX:
558 op = nir_intrinsic_load_base_vertex;
559 break;
560 case TGSI_SEMANTIC_INSTANCEID:
561 op = nir_intrinsic_load_instance_id;
562 break;
563 default:
564 unreachable("bad system value");
565 }
566
567 load = nir_intrinsic_instr_create(b->shader, op);
568 load->num_components = ncomp;
569
570 nir_ssa_dest_init(&load->instr, &load->dest, ncomp, NULL);
571 nir_builder_instr_insert(b, &load->instr);
572
573 src = nir_src_for_ssa(&load->dest.ssa);
574 break;
575 }
576
577 case TGSI_FILE_INPUT:
578 case TGSI_FILE_CONSTANT: {
579 nir_intrinsic_instr *load;
580 nir_intrinsic_op op;
581 unsigned srcn = 0;
582
583 switch (file) {
584 case TGSI_FILE_INPUT:
585 op = nir_intrinsic_load_input;
586 assert(!dim);
587 break;
588 case TGSI_FILE_CONSTANT:
589 if (dim) {
590 op = nir_intrinsic_load_ubo;
591 } else {
592 op = nir_intrinsic_load_uniform;
593 }
594 break;
595 default:
596 unreachable("No other load files supported");
597 break;
598 }
599
600 load = nir_intrinsic_instr_create(b->shader, op);
601
602 load->num_components = 4;
603 if (dim) {
604 if (dimind) {
605 load->src[srcn] =
606 ttn_src_for_file_and_index(c, dimind->File, dimind->Index,
607 NULL, NULL, NULL);
608 } else {
609 /* UBOs start at index 1 in TGSI: */
610 load->src[srcn] =
611 nir_src_for_ssa(nir_imm_int(b, dim->Index - 1));
612 }
613 srcn++;
614 }
615
616 nir_ssa_def *offset;
617 if (dim) {
618 /* UBO loads don't have a const_index[0] base offset. */
619 offset = nir_imm_int(b, index);
620 if (indirect) {
621 offset = nir_iadd(b, offset, ttn_src_for_indirect(c, indirect));
622 }
623 /* UBO offsets are in bytes, but TGSI gives them to us in vec4's */
624 offset = nir_ishl(b, offset, nir_imm_int(b, 4));
625 } else {
626 load->const_index[0] = index;
627 if (indirect) {
628 offset = ttn_src_for_indirect(c, indirect);
629 } else {
630 offset = nir_imm_int(b, 0);
631 }
632 }
633 load->src[srcn++] = nir_src_for_ssa(offset);
634
635 nir_ssa_dest_init(&load->instr, &load->dest, 4, NULL);
636 nir_builder_instr_insert(b, &load->instr);
637
638 src = nir_src_for_ssa(&load->dest.ssa);
639 break;
640 }
641
642 default:
643 unreachable("bad src file");
644 }
645
646
647 return src;
648 }
649
650 static nir_ssa_def *
651 ttn_src_for_indirect(struct ttn_compile *c, struct tgsi_ind_register *indirect)
652 {
653 nir_builder *b = &c->build;
654 nir_alu_src src;
655 memset(&src, 0, sizeof(src));
656 for (int i = 0; i < 4; i++)
657 src.swizzle[i] = indirect->Swizzle;
658 src.src = ttn_src_for_file_and_index(c,
659 indirect->File,
660 indirect->Index,
661 NULL, NULL, NULL);
662 return nir_imov_alu(b, src, 1);
663 }
664
665 static nir_alu_dest
666 ttn_get_dest(struct ttn_compile *c, struct tgsi_full_dst_register *tgsi_fdst)
667 {
668 struct tgsi_dst_register *tgsi_dst = &tgsi_fdst->Register;
669 nir_alu_dest dest;
670 unsigned index = tgsi_dst->Index;
671
672 memset(&dest, 0, sizeof(dest));
673
674 if (tgsi_dst->File == TGSI_FILE_TEMPORARY) {
675 if (c->temp_regs[index].var) {
676 nir_builder *b = &c->build;
677 nir_intrinsic_instr *load;
678 struct tgsi_ind_register *indirect =
679 tgsi_dst->Indirect ? &tgsi_fdst->Indirect : NULL;
680 nir_register *reg;
681
682 /* this works, because TGSI will give us a base offset
683 * (in case of indirect index) that points back into
684 * the array. Access can be direct or indirect, we
685 * don't really care. Just create a one-shot dst reg
686 * that will get store_var'd back into the array var
687 * at the end of ttn_emit_instruction()
688 */
689 reg = nir_local_reg_create(c->build.impl);
690 reg->num_components = 4;
691 dest.dest.reg.reg = reg;
692 dest.dest.reg.base_offset = 0;
693
694 /* since the alu op might not write to all components
695 * of the temporary, we must first do a load_var to
696 * get the previous array elements into the register.
697 * This is one area that NIR could use a bit of
698 * improvement (or opt pass to clean up the mess
699 * once things are scalarized)
700 */
701
702 load = nir_intrinsic_instr_create(c->build.shader,
703 nir_intrinsic_load_var);
704 load->num_components = 4;
705 load->variables[0] =
706 ttn_array_deref(c, load, c->temp_regs[index].var,
707 c->temp_regs[index].offset,
708 indirect);
709
710 load->dest = nir_dest_for_reg(reg);
711
712 nir_builder_instr_insert(b, &load->instr);
713 } else {
714 assert(!tgsi_dst->Indirect);
715 dest.dest.reg.reg = c->temp_regs[index].reg;
716 dest.dest.reg.base_offset = c->temp_regs[index].offset;
717 }
718 } else if (tgsi_dst->File == TGSI_FILE_OUTPUT) {
719 dest.dest.reg.reg = c->output_regs[index].reg;
720 dest.dest.reg.base_offset = c->output_regs[index].offset;
721 } else if (tgsi_dst->File == TGSI_FILE_ADDRESS) {
722 assert(index == 0);
723 dest.dest.reg.reg = c->addr_reg;
724 }
725
726 dest.write_mask = tgsi_dst->WriteMask;
727 dest.saturate = false;
728
729 if (tgsi_dst->Indirect && (tgsi_dst->File != TGSI_FILE_TEMPORARY)) {
730 nir_src *indirect = ralloc(c->build.shader, nir_src);
731 *indirect = nir_src_for_ssa(ttn_src_for_indirect(c, &tgsi_fdst->Indirect));
732 dest.dest.reg.indirect = indirect;
733 }
734
735 return dest;
736 }
737
738 static nir_variable *
739 ttn_get_var(struct ttn_compile *c, struct tgsi_full_dst_register *tgsi_fdst)
740 {
741 struct tgsi_dst_register *tgsi_dst = &tgsi_fdst->Register;
742 unsigned index = tgsi_dst->Index;
743
744 if (tgsi_dst->File == TGSI_FILE_TEMPORARY) {
745 /* we should not have an indirect when there is no var! */
746 if (!c->temp_regs[index].var)
747 assert(!tgsi_dst->Indirect);
748 return c->temp_regs[index].var;
749 }
750
751 return NULL;
752 }
753
754 static nir_ssa_def *
755 ttn_get_src(struct ttn_compile *c, struct tgsi_full_src_register *tgsi_fsrc)
756 {
757 nir_builder *b = &c->build;
758 struct tgsi_src_register *tgsi_src = &tgsi_fsrc->Register;
759 unsigned tgsi_opcode = c->token->FullInstruction.Instruction.Opcode;
760 unsigned tgsi_src_type = tgsi_opcode_infer_src_type(tgsi_opcode);
761 bool src_is_float = !(tgsi_src_type == TGSI_TYPE_SIGNED ||
762 tgsi_src_type == TGSI_TYPE_UNSIGNED);
763 nir_alu_src src;
764
765 memset(&src, 0, sizeof(src));
766
767 if (tgsi_src->File == TGSI_FILE_NULL) {
768 return nir_imm_float(b, 0.0);
769 } else if (tgsi_src->File == TGSI_FILE_SAMPLER) {
770 /* Only the index of the sampler gets used in texturing, and it will
771 * handle looking that up on its own instead of using the nir_alu_src.
772 */
773 assert(!tgsi_src->Indirect);
774 return NULL;
775 } else {
776 struct tgsi_ind_register *ind = NULL;
777 struct tgsi_dimension *dim = NULL;
778 struct tgsi_ind_register *dimind = NULL;
779 if (tgsi_src->Indirect)
780 ind = &tgsi_fsrc->Indirect;
781 if (tgsi_src->Dimension) {
782 dim = &tgsi_fsrc->Dimension;
783 if (dim->Indirect)
784 dimind = &tgsi_fsrc->DimIndirect;
785 }
786 src.src = ttn_src_for_file_and_index(c,
787 tgsi_src->File,
788 tgsi_src->Index,
789 ind, dim, dimind);
790 }
791
792 src.swizzle[0] = tgsi_src->SwizzleX;
793 src.swizzle[1] = tgsi_src->SwizzleY;
794 src.swizzle[2] = tgsi_src->SwizzleZ;
795 src.swizzle[3] = tgsi_src->SwizzleW;
796
797 nir_ssa_def *def = nir_fmov_alu(b, src, 4);
798
799 if (tgsi_src->Absolute) {
800 if (src_is_float)
801 def = nir_fabs(b, def);
802 else
803 def = nir_iabs(b, def);
804 }
805
806 if (tgsi_src->Negate) {
807 if (src_is_float)
808 def = nir_fneg(b, def);
809 else
810 def = nir_ineg(b, def);
811 }
812
813 return def;
814 }
815
816 static void
817 ttn_alu(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
818 {
819 unsigned num_srcs = nir_op_infos[op].num_inputs;
820 nir_alu_instr *instr = nir_alu_instr_create(b->shader, op);
821 unsigned i;
822
823 for (i = 0; i < num_srcs; i++)
824 instr->src[i].src = nir_src_for_ssa(src[i]);
825
826 instr->dest = dest;
827 nir_builder_instr_insert(b, &instr->instr);
828 }
829
830 static void
831 ttn_move_dest_masked(nir_builder *b, nir_alu_dest dest,
832 nir_ssa_def *def, unsigned write_mask)
833 {
834 if (!(dest.write_mask & write_mask))
835 return;
836
837 nir_alu_instr *mov = nir_alu_instr_create(b->shader, nir_op_imov);
838 mov->dest = dest;
839 mov->dest.write_mask &= write_mask;
840 mov->src[0].src = nir_src_for_ssa(def);
841 for (unsigned i = def->num_components; i < 4; i++)
842 mov->src[0].swizzle[i] = def->num_components - 1;
843 nir_builder_instr_insert(b, &mov->instr);
844 }
845
846 static void
847 ttn_move_dest(nir_builder *b, nir_alu_dest dest, nir_ssa_def *def)
848 {
849 ttn_move_dest_masked(b, dest, def, TGSI_WRITEMASK_XYZW);
850 }
851
852 static void
853 ttn_arl(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
854 {
855 ttn_move_dest(b, dest, nir_f2i(b, nir_ffloor(b, src[0])));
856 }
857
858 /* EXP - Approximate Exponential Base 2
859 * dst.x = 2^{\lfloor src.x\rfloor}
860 * dst.y = src.x - \lfloor src.x\rfloor
861 * dst.z = 2^{src.x}
862 * dst.w = 1.0
863 */
864 static void
865 ttn_exp(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
866 {
867 nir_ssa_def *srcx = ttn_channel(b, src[0], X);
868
869 ttn_move_dest_masked(b, dest, nir_fexp2(b, nir_ffloor(b, srcx)),
870 TGSI_WRITEMASK_X);
871 ttn_move_dest_masked(b, dest, nir_fsub(b, srcx, nir_ffloor(b, srcx)),
872 TGSI_WRITEMASK_Y);
873 ttn_move_dest_masked(b, dest, nir_fexp2(b, srcx), TGSI_WRITEMASK_Z);
874 ttn_move_dest_masked(b, dest, nir_imm_float(b, 1.0), TGSI_WRITEMASK_W);
875 }
876
877 /* LOG - Approximate Logarithm Base 2
878 * dst.x = \lfloor\log_2{|src.x|}\rfloor
879 * dst.y = \frac{|src.x|}{2^{\lfloor\log_2{|src.x|}\rfloor}}
880 * dst.z = \log_2{|src.x|}
881 * dst.w = 1.0
882 */
883 static void
884 ttn_log(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
885 {
886 nir_ssa_def *abs_srcx = nir_fabs(b, ttn_channel(b, src[0], X));
887 nir_ssa_def *log2 = nir_flog2(b, abs_srcx);
888
889 ttn_move_dest_masked(b, dest, nir_ffloor(b, log2), TGSI_WRITEMASK_X);
890 ttn_move_dest_masked(b, dest,
891 nir_fdiv(b, abs_srcx, nir_fexp2(b, nir_ffloor(b, log2))),
892 TGSI_WRITEMASK_Y);
893 ttn_move_dest_masked(b, dest, nir_flog2(b, abs_srcx), TGSI_WRITEMASK_Z);
894 ttn_move_dest_masked(b, dest, nir_imm_float(b, 1.0), TGSI_WRITEMASK_W);
895 }
896
897 /* DST - Distance Vector
898 * dst.x = 1.0
899 * dst.y = src0.y \times src1.y
900 * dst.z = src0.z
901 * dst.w = src1.w
902 */
903 static void
904 ttn_dst(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
905 {
906 ttn_move_dest_masked(b, dest, nir_imm_float(b, 1.0), TGSI_WRITEMASK_X);
907 ttn_move_dest_masked(b, dest, nir_fmul(b, src[0], src[1]), TGSI_WRITEMASK_Y);
908 ttn_move_dest_masked(b, dest, nir_fmov(b, src[0]), TGSI_WRITEMASK_Z);
909 ttn_move_dest_masked(b, dest, nir_fmov(b, src[1]), TGSI_WRITEMASK_W);
910 }
911
912 /* LIT - Light Coefficients
913 * dst.x = 1.0
914 * dst.y = max(src.x, 0.0)
915 * dst.z = (src.x > 0.0) ? max(src.y, 0.0)^{clamp(src.w, -128.0, 128.0))} : 0
916 * dst.w = 1.0
917 */
918 static void
919 ttn_lit(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
920 {
921 ttn_move_dest_masked(b, dest, nir_imm_float(b, 1.0), TGSI_WRITEMASK_XW);
922
923 ttn_move_dest_masked(b, dest, nir_fmax(b, ttn_channel(b, src[0], X),
924 nir_imm_float(b, 0.0)), TGSI_WRITEMASK_Y);
925
926 if (dest.write_mask & TGSI_WRITEMASK_Z) {
927 nir_ssa_def *src0_y = ttn_channel(b, src[0], Y);
928 nir_ssa_def *wclamp = nir_fmax(b, nir_fmin(b, ttn_channel(b, src[0], W),
929 nir_imm_float(b, 128.0)),
930 nir_imm_float(b, -128.0));
931 nir_ssa_def *pow = nir_fpow(b, nir_fmax(b, src0_y, nir_imm_float(b, 0.0)),
932 wclamp);
933
934 ttn_move_dest_masked(b, dest,
935 nir_bcsel(b,
936 nir_fge(b,
937 nir_imm_float(b, 0.0),
938 ttn_channel(b, src[0], X)),
939 nir_imm_float(b, 0.0),
940 pow),
941 TGSI_WRITEMASK_Z);
942 }
943 }
944
945 /* SCS - Sine Cosine
946 * dst.x = \cos{src.x}
947 * dst.y = \sin{src.x}
948 * dst.z = 0.0
949 * dst.w = 1.0
950 */
951 static void
952 ttn_scs(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
953 {
954 ttn_move_dest_masked(b, dest, nir_fcos(b, ttn_channel(b, src[0], X)),
955 TGSI_WRITEMASK_X);
956 ttn_move_dest_masked(b, dest, nir_fsin(b, ttn_channel(b, src[0], X)),
957 TGSI_WRITEMASK_Y);
958 ttn_move_dest_masked(b, dest, nir_imm_float(b, 0.0), TGSI_WRITEMASK_Z);
959 ttn_move_dest_masked(b, dest, nir_imm_float(b, 1.0), TGSI_WRITEMASK_W);
960 }
961
962 static void
963 ttn_sle(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
964 {
965 ttn_move_dest(b, dest, nir_sge(b, src[1], src[0]));
966 }
967
968 static void
969 ttn_sgt(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
970 {
971 ttn_move_dest(b, dest, nir_slt(b, src[1], src[0]));
972 }
973
974 static void
975 ttn_clamp(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
976 {
977 ttn_move_dest(b, dest, nir_fmin(b, nir_fmax(b, src[0], src[1]), src[2]));
978 }
979
980 static void
981 ttn_xpd(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
982 {
983 ttn_move_dest_masked(b, dest,
984 nir_fsub(b,
985 nir_fmul(b,
986 ttn_swizzle(b, src[0], Y, Z, X, X),
987 ttn_swizzle(b, src[1], Z, X, Y, X)),
988 nir_fmul(b,
989 ttn_swizzle(b, src[1], Y, Z, X, X),
990 ttn_swizzle(b, src[0], Z, X, Y, X))),
991 TGSI_WRITEMASK_XYZ);
992 ttn_move_dest_masked(b, dest, nir_imm_float(b, 1.0), TGSI_WRITEMASK_W);
993 }
994
995 static void
996 ttn_dp2a(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
997 {
998 ttn_move_dest(b, dest,
999 ttn_channel(b, nir_fadd(b, nir_fdot2(b, src[0], src[1]),
1000 src[2]),
1001 X));
1002 }
1003
1004 static void
1005 ttn_dp2(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
1006 {
1007 ttn_move_dest(b, dest, nir_fdot2(b, src[0], src[1]));
1008 }
1009
1010 static void
1011 ttn_dp3(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
1012 {
1013 ttn_move_dest(b, dest, nir_fdot3(b, src[0], src[1]));
1014 }
1015
1016 static void
1017 ttn_dp4(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
1018 {
1019 ttn_move_dest(b, dest, nir_fdot4(b, src[0], src[1]));
1020 }
1021
1022 static void
1023 ttn_dph(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
1024 {
1025 ttn_move_dest(b, dest, nir_fadd(b, nir_fdot3(b, src[0], src[1]),
1026 ttn_channel(b, src[1], W)));
1027 }
1028
1029 static void
1030 ttn_umad(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
1031 {
1032 ttn_move_dest(b, dest, nir_iadd(b, nir_imul(b, src[0], src[1]), src[2]));
1033 }
1034
1035 static void
1036 ttn_arr(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
1037 {
1038 ttn_move_dest(b, dest, nir_ffloor(b, nir_fadd(b, src[0], nir_imm_float(b, 0.5))));
1039 }
1040
1041 static void
1042 ttn_cmp(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
1043 {
1044 ttn_move_dest(b, dest, nir_bcsel(b,
1045 nir_flt(b, src[0], nir_imm_float(b, 0.0)),
1046 src[1], src[2]));
1047 }
1048
1049 static void
1050 ttn_ucmp(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
1051 {
1052 ttn_move_dest(b, dest, nir_bcsel(b,
1053 nir_ine(b, src[0], nir_imm_int(b, 0)),
1054 src[1], src[2]));
1055 }
1056
1057 static void
1058 ttn_kill(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
1059 {
1060 nir_intrinsic_instr *discard =
1061 nir_intrinsic_instr_create(b->shader, nir_intrinsic_discard);
1062 nir_builder_instr_insert(b, &discard->instr);
1063 }
1064
1065 static void
1066 ttn_kill_if(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
1067 {
1068 nir_ssa_def *cmp = nir_bany_inequal4(b, nir_flt(b, src[0],
1069 nir_imm_float(b, 0.0)),
1070 nir_imm_int(b, 0));
1071 nir_intrinsic_instr *discard =
1072 nir_intrinsic_instr_create(b->shader, nir_intrinsic_discard_if);
1073 discard->src[0] = nir_src_for_ssa(cmp);
1074 nir_builder_instr_insert(b, &discard->instr);
1075 }
1076
1077 static void
1078 ttn_if(struct ttn_compile *c, nir_ssa_def *src, bool is_uint)
1079 {
1080 nir_builder *b = &c->build;
1081
1082 src = ttn_channel(b, src, X);
1083
1084 nir_if *if_stmt = nir_if_create(b->shader);
1085 if (is_uint) {
1086 if_stmt->condition = nir_src_for_ssa(nir_ine(b, src, nir_imm_int(b, 0)));
1087 } else {
1088 if_stmt->condition = nir_src_for_ssa(nir_fne(b, src, nir_imm_int(b, 0)));
1089 }
1090 nir_builder_cf_insert(b, &if_stmt->cf_node);
1091
1092 c->if_stack[c->if_stack_pos] = nir_after_cf_node(&if_stmt->cf_node);
1093 c->if_stack_pos++;
1094
1095 b->cursor = nir_after_cf_list(&if_stmt->then_list);
1096
1097 c->if_stack[c->if_stack_pos] = nir_after_cf_list(&if_stmt->else_list);
1098 c->if_stack_pos++;
1099 }
1100
1101 static void
1102 ttn_else(struct ttn_compile *c)
1103 {
1104 nir_builder *b = &c->build;
1105
1106 b->cursor = c->if_stack[c->if_stack_pos - 1];
1107 }
1108
1109 static void
1110 ttn_endif(struct ttn_compile *c)
1111 {
1112 nir_builder *b = &c->build;
1113
1114 c->if_stack_pos -= 2;
1115 b->cursor = c->if_stack[c->if_stack_pos];
1116 }
1117
1118 static void
1119 ttn_bgnloop(struct ttn_compile *c)
1120 {
1121 nir_builder *b = &c->build;
1122
1123 nir_loop *loop = nir_loop_create(b->shader);
1124 nir_builder_cf_insert(b, &loop->cf_node);
1125
1126 c->loop_stack[c->loop_stack_pos] = nir_after_cf_node(&loop->cf_node);
1127 c->loop_stack_pos++;
1128
1129 b->cursor = nir_after_cf_list(&loop->body);
1130 }
1131
1132 static void
1133 ttn_cont(nir_builder *b)
1134 {
1135 nir_jump_instr *instr = nir_jump_instr_create(b->shader, nir_jump_continue);
1136 nir_builder_instr_insert(b, &instr->instr);
1137 }
1138
1139 static void
1140 ttn_brk(nir_builder *b)
1141 {
1142 nir_jump_instr *instr = nir_jump_instr_create(b->shader, nir_jump_break);
1143 nir_builder_instr_insert(b, &instr->instr);
1144 }
1145
1146 static void
1147 ttn_endloop(struct ttn_compile *c)
1148 {
1149 nir_builder *b = &c->build;
1150
1151 c->loop_stack_pos--;
1152 b->cursor = c->loop_stack[c->loop_stack_pos];
1153 }
1154
1155 static void
1156 setup_texture_info(nir_tex_instr *instr, unsigned texture)
1157 {
1158 switch (texture) {
1159 case TGSI_TEXTURE_BUFFER:
1160 instr->sampler_dim = GLSL_SAMPLER_DIM_BUF;
1161 break;
1162 case TGSI_TEXTURE_1D:
1163 instr->sampler_dim = GLSL_SAMPLER_DIM_1D;
1164 break;
1165 case TGSI_TEXTURE_1D_ARRAY:
1166 instr->sampler_dim = GLSL_SAMPLER_DIM_1D;
1167 instr->is_array = true;
1168 break;
1169 case TGSI_TEXTURE_SHADOW1D:
1170 instr->sampler_dim = GLSL_SAMPLER_DIM_1D;
1171 instr->is_shadow = true;
1172 break;
1173 case TGSI_TEXTURE_SHADOW1D_ARRAY:
1174 instr->sampler_dim = GLSL_SAMPLER_DIM_1D;
1175 instr->is_shadow = true;
1176 instr->is_array = true;
1177 break;
1178 case TGSI_TEXTURE_2D:
1179 instr->sampler_dim = GLSL_SAMPLER_DIM_2D;
1180 break;
1181 case TGSI_TEXTURE_2D_ARRAY:
1182 instr->sampler_dim = GLSL_SAMPLER_DIM_2D;
1183 instr->is_array = true;
1184 break;
1185 case TGSI_TEXTURE_2D_MSAA:
1186 instr->sampler_dim = GLSL_SAMPLER_DIM_MS;
1187 break;
1188 case TGSI_TEXTURE_2D_ARRAY_MSAA:
1189 instr->sampler_dim = GLSL_SAMPLER_DIM_MS;
1190 instr->is_array = true;
1191 break;
1192 case TGSI_TEXTURE_SHADOW2D:
1193 instr->sampler_dim = GLSL_SAMPLER_DIM_2D;
1194 instr->is_shadow = true;
1195 break;
1196 case TGSI_TEXTURE_SHADOW2D_ARRAY:
1197 instr->sampler_dim = GLSL_SAMPLER_DIM_2D;
1198 instr->is_shadow = true;
1199 instr->is_array = true;
1200 break;
1201 case TGSI_TEXTURE_3D:
1202 instr->sampler_dim = GLSL_SAMPLER_DIM_3D;
1203 break;
1204 case TGSI_TEXTURE_CUBE:
1205 instr->sampler_dim = GLSL_SAMPLER_DIM_CUBE;
1206 break;
1207 case TGSI_TEXTURE_CUBE_ARRAY:
1208 instr->sampler_dim = GLSL_SAMPLER_DIM_CUBE;
1209 instr->is_array = true;
1210 break;
1211 case TGSI_TEXTURE_SHADOWCUBE:
1212 instr->sampler_dim = GLSL_SAMPLER_DIM_CUBE;
1213 instr->is_shadow = true;
1214 break;
1215 case TGSI_TEXTURE_SHADOWCUBE_ARRAY:
1216 instr->sampler_dim = GLSL_SAMPLER_DIM_CUBE;
1217 instr->is_shadow = true;
1218 instr->is_array = true;
1219 break;
1220 case TGSI_TEXTURE_RECT:
1221 instr->sampler_dim = GLSL_SAMPLER_DIM_RECT;
1222 break;
1223 case TGSI_TEXTURE_SHADOWRECT:
1224 instr->sampler_dim = GLSL_SAMPLER_DIM_RECT;
1225 instr->is_shadow = true;
1226 break;
1227 default:
1228 fprintf(stderr, "Unknown TGSI texture target %d\n", texture);
1229 abort();
1230 }
1231 }
1232
1233 static void
1234 ttn_tex(struct ttn_compile *c, nir_alu_dest dest, nir_ssa_def **src)
1235 {
1236 nir_builder *b = &c->build;
1237 struct tgsi_full_instruction *tgsi_inst = &c->token->FullInstruction;
1238 nir_tex_instr *instr;
1239 nir_texop op;
1240 unsigned num_srcs, samp = 1, sview, i;
1241
1242 switch (tgsi_inst->Instruction.Opcode) {
1243 case TGSI_OPCODE_TEX:
1244 op = nir_texop_tex;
1245 num_srcs = 1;
1246 break;
1247 case TGSI_OPCODE_TEX2:
1248 op = nir_texop_tex;
1249 num_srcs = 1;
1250 samp = 2;
1251 break;
1252 case TGSI_OPCODE_TXP:
1253 op = nir_texop_tex;
1254 num_srcs = 2;
1255 break;
1256 case TGSI_OPCODE_TXB:
1257 op = nir_texop_txb;
1258 num_srcs = 2;
1259 break;
1260 case TGSI_OPCODE_TXB2:
1261 op = nir_texop_txb;
1262 num_srcs = 2;
1263 samp = 2;
1264 break;
1265 case TGSI_OPCODE_TXL:
1266 op = nir_texop_txl;
1267 num_srcs = 2;
1268 break;
1269 case TGSI_OPCODE_TXL2:
1270 op = nir_texop_txl;
1271 num_srcs = 2;
1272 samp = 2;
1273 break;
1274 case TGSI_OPCODE_TXF:
1275 if (tgsi_inst->Texture.Texture == TGSI_TEXTURE_2D_MSAA ||
1276 tgsi_inst->Texture.Texture == TGSI_TEXTURE_2D_ARRAY_MSAA) {
1277 op = nir_texop_txf_ms;
1278 } else {
1279 op = nir_texop_txf;
1280 }
1281 num_srcs = 2;
1282 break;
1283 case TGSI_OPCODE_TXD:
1284 op = nir_texop_txd;
1285 num_srcs = 3;
1286 samp = 3;
1287 break;
1288 case TGSI_OPCODE_LODQ:
1289 op = nir_texop_lod;
1290 num_srcs = 1;
1291 break;
1292
1293 default:
1294 fprintf(stderr, "unknown TGSI tex op %d\n", tgsi_inst->Instruction.Opcode);
1295 abort();
1296 }
1297
1298 if (tgsi_inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D ||
1299 tgsi_inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D_ARRAY ||
1300 tgsi_inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D ||
1301 tgsi_inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D_ARRAY ||
1302 tgsi_inst->Texture.Texture == TGSI_TEXTURE_SHADOWRECT ||
1303 tgsi_inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE ||
1304 tgsi_inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY) {
1305 num_srcs++;
1306 }
1307
1308 num_srcs += tgsi_inst->Texture.NumOffsets;
1309
1310 instr = nir_tex_instr_create(b->shader, num_srcs);
1311 instr->op = op;
1312
1313 setup_texture_info(instr, tgsi_inst->Texture.Texture);
1314
1315 switch (instr->sampler_dim) {
1316 case GLSL_SAMPLER_DIM_1D:
1317 case GLSL_SAMPLER_DIM_BUF:
1318 instr->coord_components = 1;
1319 break;
1320 case GLSL_SAMPLER_DIM_2D:
1321 case GLSL_SAMPLER_DIM_RECT:
1322 case GLSL_SAMPLER_DIM_EXTERNAL:
1323 case GLSL_SAMPLER_DIM_MS:
1324 instr->coord_components = 2;
1325 break;
1326 case GLSL_SAMPLER_DIM_3D:
1327 case GLSL_SAMPLER_DIM_CUBE:
1328 instr->coord_components = 3;
1329 break;
1330 }
1331
1332 if (instr->is_array)
1333 instr->coord_components++;
1334
1335 assert(tgsi_inst->Src[samp].Register.File == TGSI_FILE_SAMPLER);
1336 instr->sampler_index = tgsi_inst->Src[samp].Register.Index;
1337
1338 /* TODO if we supported any opc's which take an explicit SVIEW
1339 * src, we would use that here instead. But for the "legacy"
1340 * texture opc's the SVIEW index is same as SAMP index:
1341 */
1342 sview = instr->sampler_index;
1343
1344 if (op == nir_texop_lod) {
1345 instr->dest_type = nir_type_float;
1346 } else if (sview < c->num_samp_types) {
1347 instr->dest_type = c->samp_types[sview];
1348 } else {
1349 instr->dest_type = nir_type_float;
1350 }
1351
1352 unsigned src_number = 0;
1353
1354 instr->src[src_number].src =
1355 nir_src_for_ssa(nir_swizzle(b, src[0], SWIZ(X, Y, Z, W),
1356 instr->coord_components, false));
1357 instr->src[src_number].src_type = nir_tex_src_coord;
1358 src_number++;
1359
1360 if (tgsi_inst->Instruction.Opcode == TGSI_OPCODE_TXP) {
1361 instr->src[src_number].src = nir_src_for_ssa(ttn_channel(b, src[0], W));
1362 instr->src[src_number].src_type = nir_tex_src_projector;
1363 src_number++;
1364 }
1365
1366 if (tgsi_inst->Instruction.Opcode == TGSI_OPCODE_TXB) {
1367 instr->src[src_number].src = nir_src_for_ssa(ttn_channel(b, src[0], W));
1368 instr->src[src_number].src_type = nir_tex_src_bias;
1369 src_number++;
1370 }
1371
1372 if (tgsi_inst->Instruction.Opcode == TGSI_OPCODE_TXB2) {
1373 instr->src[src_number].src = nir_src_for_ssa(ttn_channel(b, src[1], X));
1374 instr->src[src_number].src_type = nir_tex_src_bias;
1375 src_number++;
1376 }
1377
1378 if (tgsi_inst->Instruction.Opcode == TGSI_OPCODE_TXL) {
1379 instr->src[src_number].src = nir_src_for_ssa(ttn_channel(b, src[0], W));
1380 instr->src[src_number].src_type = nir_tex_src_lod;
1381 src_number++;
1382 }
1383
1384 if (tgsi_inst->Instruction.Opcode == TGSI_OPCODE_TXL2) {
1385 instr->src[src_number].src = nir_src_for_ssa(ttn_channel(b, src[1], X));
1386 instr->src[src_number].src_type = nir_tex_src_lod;
1387 src_number++;
1388 }
1389
1390 if (tgsi_inst->Instruction.Opcode == TGSI_OPCODE_TXF) {
1391 instr->src[src_number].src = nir_src_for_ssa(ttn_channel(b, src[0], W));
1392 if (op == nir_texop_txf_ms)
1393 instr->src[src_number].src_type = nir_tex_src_ms_index;
1394 else
1395 instr->src[src_number].src_type = nir_tex_src_lod;
1396 src_number++;
1397 }
1398
1399 if (tgsi_inst->Instruction.Opcode == TGSI_OPCODE_TXD) {
1400 instr->src[src_number].src =
1401 nir_src_for_ssa(nir_swizzle(b, src[1], SWIZ(X, Y, Z, W),
1402 instr->coord_components, false));
1403 instr->src[src_number].src_type = nir_tex_src_ddx;
1404 src_number++;
1405 instr->src[src_number].src =
1406 nir_src_for_ssa(nir_swizzle(b, src[2], SWIZ(X, Y, Z, W),
1407 instr->coord_components, false));
1408 instr->src[src_number].src_type = nir_tex_src_ddy;
1409 src_number++;
1410 }
1411
1412 if (instr->is_shadow) {
1413 if (instr->coord_components == 4)
1414 instr->src[src_number].src = nir_src_for_ssa(ttn_channel(b, src[1], X));
1415 else if (instr->coord_components == 3)
1416 instr->src[src_number].src = nir_src_for_ssa(ttn_channel(b, src[0], W));
1417 else
1418 instr->src[src_number].src = nir_src_for_ssa(ttn_channel(b, src[0], Z));
1419
1420 instr->src[src_number].src_type = nir_tex_src_comparitor;
1421 src_number++;
1422 }
1423
1424 for (i = 0; i < tgsi_inst->Texture.NumOffsets; i++) {
1425 struct tgsi_texture_offset *tex_offset = &tgsi_inst->TexOffsets[i];
1426 /* since TexOffset ins't using tgsi_full_src_register we get to
1427 * do some extra gymnastics:
1428 */
1429 nir_alu_src src;
1430
1431 memset(&src, 0, sizeof(src));
1432
1433 src.src = ttn_src_for_file_and_index(c,
1434 tex_offset->File,
1435 tex_offset->Index,
1436 NULL, NULL, NULL);
1437
1438 src.swizzle[0] = tex_offset->SwizzleX;
1439 src.swizzle[1] = tex_offset->SwizzleY;
1440 src.swizzle[2] = tex_offset->SwizzleZ;
1441 src.swizzle[3] = TGSI_SWIZZLE_W;
1442
1443 instr->src[src_number].src_type = nir_tex_src_offset;
1444 instr->src[src_number].src = nir_src_for_ssa(
1445 nir_fmov_alu(b, src, nir_tex_instr_src_size(instr, src_number)));
1446 src_number++;
1447 }
1448
1449 assert(src_number == num_srcs);
1450
1451 nir_ssa_dest_init(&instr->instr, &instr->dest, 4, NULL);
1452 nir_builder_instr_insert(b, &instr->instr);
1453
1454 /* Resolve the writemask on the texture op. */
1455 ttn_move_dest(b, dest, &instr->dest.ssa);
1456 }
1457
1458 /* TGSI_OPCODE_TXQ is actually two distinct operations:
1459 *
1460 * dst.x = texture\_width(unit, lod)
1461 * dst.y = texture\_height(unit, lod)
1462 * dst.z = texture\_depth(unit, lod)
1463 * dst.w = texture\_levels(unit)
1464 *
1465 * dst.xyz map to NIR txs opcode, and dst.w maps to query_levels
1466 */
1467 static void
1468 ttn_txq(struct ttn_compile *c, nir_alu_dest dest, nir_ssa_def **src)
1469 {
1470 nir_builder *b = &c->build;
1471 struct tgsi_full_instruction *tgsi_inst = &c->token->FullInstruction;
1472 nir_tex_instr *txs, *qlv;
1473
1474 txs = nir_tex_instr_create(b->shader, 1);
1475 txs->op = nir_texop_txs;
1476 setup_texture_info(txs, tgsi_inst->Texture.Texture);
1477
1478 qlv = nir_tex_instr_create(b->shader, 0);
1479 qlv->op = nir_texop_query_levels;
1480 setup_texture_info(qlv, tgsi_inst->Texture.Texture);
1481
1482 assert(tgsi_inst->Src[1].Register.File == TGSI_FILE_SAMPLER);
1483 txs->sampler_index = tgsi_inst->Src[1].Register.Index;
1484 qlv->sampler_index = tgsi_inst->Src[1].Register.Index;
1485
1486 /* only single src, the lod: */
1487 txs->src[0].src = nir_src_for_ssa(ttn_channel(b, src[0], X));
1488 txs->src[0].src_type = nir_tex_src_lod;
1489
1490 nir_ssa_dest_init(&txs->instr, &txs->dest, 3, NULL);
1491 nir_builder_instr_insert(b, &txs->instr);
1492
1493 nir_ssa_dest_init(&qlv->instr, &qlv->dest, 1, NULL);
1494 nir_builder_instr_insert(b, &qlv->instr);
1495
1496 ttn_move_dest_masked(b, dest, &txs->dest.ssa, TGSI_WRITEMASK_XYZ);
1497 ttn_move_dest_masked(b, dest, &qlv->dest.ssa, TGSI_WRITEMASK_W);
1498 }
1499
1500 static const nir_op op_trans[TGSI_OPCODE_LAST] = {
1501 [TGSI_OPCODE_ARL] = 0,
1502 [TGSI_OPCODE_MOV] = nir_op_fmov,
1503 [TGSI_OPCODE_LIT] = 0,
1504 [TGSI_OPCODE_RCP] = nir_op_frcp,
1505 [TGSI_OPCODE_RSQ] = nir_op_frsq,
1506 [TGSI_OPCODE_EXP] = 0,
1507 [TGSI_OPCODE_LOG] = 0,
1508 [TGSI_OPCODE_MUL] = nir_op_fmul,
1509 [TGSI_OPCODE_ADD] = nir_op_fadd,
1510 [TGSI_OPCODE_DP3] = 0,
1511 [TGSI_OPCODE_DP4] = 0,
1512 [TGSI_OPCODE_DST] = 0,
1513 [TGSI_OPCODE_MIN] = nir_op_fmin,
1514 [TGSI_OPCODE_MAX] = nir_op_fmax,
1515 [TGSI_OPCODE_SLT] = nir_op_slt,
1516 [TGSI_OPCODE_SGE] = nir_op_sge,
1517 [TGSI_OPCODE_MAD] = nir_op_ffma,
1518 [TGSI_OPCODE_SUB] = nir_op_fsub,
1519 [TGSI_OPCODE_LRP] = 0,
1520 [TGSI_OPCODE_SQRT] = nir_op_fsqrt,
1521 [TGSI_OPCODE_DP2A] = 0,
1522 [TGSI_OPCODE_FRC] = nir_op_ffract,
1523 [TGSI_OPCODE_CLAMP] = 0,
1524 [TGSI_OPCODE_FLR] = nir_op_ffloor,
1525 [TGSI_OPCODE_ROUND] = nir_op_fround_even,
1526 [TGSI_OPCODE_EX2] = nir_op_fexp2,
1527 [TGSI_OPCODE_LG2] = nir_op_flog2,
1528 [TGSI_OPCODE_POW] = nir_op_fpow,
1529 [TGSI_OPCODE_XPD] = 0,
1530 [TGSI_OPCODE_ABS] = nir_op_fabs,
1531 [TGSI_OPCODE_DPH] = 0,
1532 [TGSI_OPCODE_COS] = nir_op_fcos,
1533 [TGSI_OPCODE_DDX] = nir_op_fddx,
1534 [TGSI_OPCODE_DDY] = nir_op_fddy,
1535 [TGSI_OPCODE_KILL] = 0,
1536 [TGSI_OPCODE_PK2H] = 0, /* XXX */
1537 [TGSI_OPCODE_PK2US] = 0, /* XXX */
1538 [TGSI_OPCODE_PK4B] = 0, /* XXX */
1539 [TGSI_OPCODE_PK4UB] = 0, /* XXX */
1540 [TGSI_OPCODE_SEQ] = nir_op_seq,
1541 [TGSI_OPCODE_SGT] = 0,
1542 [TGSI_OPCODE_SIN] = nir_op_fsin,
1543 [TGSI_OPCODE_SNE] = nir_op_sne,
1544 [TGSI_OPCODE_SLE] = 0,
1545 [TGSI_OPCODE_TEX] = 0,
1546 [TGSI_OPCODE_TXD] = 0,
1547 [TGSI_OPCODE_TXP] = 0,
1548 [TGSI_OPCODE_UP2H] = 0, /* XXX */
1549 [TGSI_OPCODE_UP2US] = 0, /* XXX */
1550 [TGSI_OPCODE_UP4B] = 0, /* XXX */
1551 [TGSI_OPCODE_UP4UB] = 0, /* XXX */
1552 [TGSI_OPCODE_ARR] = 0,
1553
1554 /* No function calls, yet. */
1555 [TGSI_OPCODE_CAL] = 0, /* XXX */
1556 [TGSI_OPCODE_RET] = 0, /* XXX */
1557
1558 [TGSI_OPCODE_SSG] = nir_op_fsign,
1559 [TGSI_OPCODE_CMP] = 0,
1560 [TGSI_OPCODE_SCS] = 0,
1561 [TGSI_OPCODE_TXB] = 0,
1562 [TGSI_OPCODE_DIV] = nir_op_fdiv,
1563 [TGSI_OPCODE_DP2] = 0,
1564 [TGSI_OPCODE_DP2A] = 0,
1565 [TGSI_OPCODE_TXL] = 0,
1566
1567 [TGSI_OPCODE_BRK] = 0,
1568 [TGSI_OPCODE_IF] = 0,
1569 [TGSI_OPCODE_UIF] = 0,
1570 [TGSI_OPCODE_ELSE] = 0,
1571 [TGSI_OPCODE_ENDIF] = 0,
1572
1573 [TGSI_OPCODE_DDX_FINE] = nir_op_fddx_fine,
1574 [TGSI_OPCODE_DDY_FINE] = nir_op_fddy_fine,
1575
1576 [TGSI_OPCODE_PUSHA] = 0, /* XXX */
1577 [TGSI_OPCODE_POPA] = 0, /* XXX */
1578
1579 [TGSI_OPCODE_CEIL] = nir_op_fceil,
1580 [TGSI_OPCODE_I2F] = nir_op_i2f,
1581 [TGSI_OPCODE_NOT] = nir_op_inot,
1582 [TGSI_OPCODE_TRUNC] = nir_op_ftrunc,
1583 [TGSI_OPCODE_SHL] = nir_op_ishl,
1584 [TGSI_OPCODE_AND] = nir_op_iand,
1585 [TGSI_OPCODE_OR] = nir_op_ior,
1586 [TGSI_OPCODE_MOD] = nir_op_umod,
1587 [TGSI_OPCODE_XOR] = nir_op_ixor,
1588 [TGSI_OPCODE_SAD] = 0, /* XXX */
1589 [TGSI_OPCODE_TXF] = 0,
1590 [TGSI_OPCODE_TXQ] = 0,
1591
1592 [TGSI_OPCODE_CONT] = 0,
1593
1594 [TGSI_OPCODE_EMIT] = 0, /* XXX */
1595 [TGSI_OPCODE_ENDPRIM] = 0, /* XXX */
1596
1597 [TGSI_OPCODE_BGNLOOP] = 0,
1598 [TGSI_OPCODE_BGNSUB] = 0, /* XXX: no function calls */
1599 [TGSI_OPCODE_ENDLOOP] = 0,
1600 [TGSI_OPCODE_ENDSUB] = 0, /* XXX: no function calls */
1601
1602 [TGSI_OPCODE_TXQ_LZ] = 0,
1603 [TGSI_OPCODE_NOP] = 0,
1604 [TGSI_OPCODE_FSEQ] = nir_op_feq,
1605 [TGSI_OPCODE_FSGE] = nir_op_fge,
1606 [TGSI_OPCODE_FSLT] = nir_op_flt,
1607 [TGSI_OPCODE_FSNE] = nir_op_fne,
1608
1609 /* No control flow yet */
1610 [TGSI_OPCODE_CALLNZ] = 0, /* XXX */
1611 [TGSI_OPCODE_BREAKC] = 0, /* not emitted by glsl_to_tgsi.cpp */
1612
1613 [TGSI_OPCODE_KILL_IF] = 0,
1614
1615 [TGSI_OPCODE_END] = 0,
1616
1617 [TGSI_OPCODE_F2I] = nir_op_f2i,
1618 [TGSI_OPCODE_IDIV] = nir_op_idiv,
1619 [TGSI_OPCODE_IMAX] = nir_op_imax,
1620 [TGSI_OPCODE_IMIN] = nir_op_imin,
1621 [TGSI_OPCODE_INEG] = nir_op_ineg,
1622 [TGSI_OPCODE_ISGE] = nir_op_ige,
1623 [TGSI_OPCODE_ISHR] = nir_op_ishr,
1624 [TGSI_OPCODE_ISLT] = nir_op_ilt,
1625 [TGSI_OPCODE_F2U] = nir_op_f2u,
1626 [TGSI_OPCODE_U2F] = nir_op_u2f,
1627 [TGSI_OPCODE_UADD] = nir_op_iadd,
1628 [TGSI_OPCODE_UDIV] = nir_op_udiv,
1629 [TGSI_OPCODE_UMAD] = 0,
1630 [TGSI_OPCODE_UMAX] = nir_op_umax,
1631 [TGSI_OPCODE_UMIN] = nir_op_umin,
1632 [TGSI_OPCODE_UMOD] = nir_op_umod,
1633 [TGSI_OPCODE_UMUL] = nir_op_imul,
1634 [TGSI_OPCODE_USEQ] = nir_op_ieq,
1635 [TGSI_OPCODE_USGE] = nir_op_uge,
1636 [TGSI_OPCODE_USHR] = nir_op_ushr,
1637 [TGSI_OPCODE_USLT] = nir_op_ult,
1638 [TGSI_OPCODE_USNE] = nir_op_ine,
1639
1640 [TGSI_OPCODE_SWITCH] = 0, /* not emitted by glsl_to_tgsi.cpp */
1641 [TGSI_OPCODE_CASE] = 0, /* not emitted by glsl_to_tgsi.cpp */
1642 [TGSI_OPCODE_DEFAULT] = 0, /* not emitted by glsl_to_tgsi.cpp */
1643 [TGSI_OPCODE_ENDSWITCH] = 0, /* not emitted by glsl_to_tgsi.cpp */
1644
1645 /* XXX: SAMPLE opcodes */
1646
1647 [TGSI_OPCODE_UARL] = nir_op_imov,
1648 [TGSI_OPCODE_UCMP] = 0,
1649 [TGSI_OPCODE_IABS] = nir_op_iabs,
1650 [TGSI_OPCODE_ISSG] = nir_op_isign,
1651
1652 /* XXX: atomics */
1653
1654 [TGSI_OPCODE_TEX2] = 0,
1655 [TGSI_OPCODE_TXB2] = 0,
1656 [TGSI_OPCODE_TXL2] = 0,
1657
1658 [TGSI_OPCODE_IMUL_HI] = nir_op_imul_high,
1659 [TGSI_OPCODE_UMUL_HI] = nir_op_umul_high,
1660
1661 [TGSI_OPCODE_TG4] = 0,
1662 [TGSI_OPCODE_LODQ] = 0,
1663
1664 [TGSI_OPCODE_IBFE] = nir_op_ibitfield_extract,
1665 [TGSI_OPCODE_UBFE] = nir_op_ubitfield_extract,
1666 [TGSI_OPCODE_BFI] = nir_op_bitfield_insert,
1667 [TGSI_OPCODE_BREV] = nir_op_bitfield_reverse,
1668 [TGSI_OPCODE_POPC] = nir_op_bit_count,
1669 [TGSI_OPCODE_LSB] = nir_op_find_lsb,
1670 [TGSI_OPCODE_IMSB] = nir_op_ifind_msb,
1671 [TGSI_OPCODE_UMSB] = nir_op_ufind_msb,
1672
1673 [TGSI_OPCODE_INTERP_CENTROID] = 0, /* XXX */
1674 [TGSI_OPCODE_INTERP_SAMPLE] = 0, /* XXX */
1675 [TGSI_OPCODE_INTERP_OFFSET] = 0, /* XXX */
1676 };
1677
1678 static void
1679 ttn_emit_instruction(struct ttn_compile *c)
1680 {
1681 nir_builder *b = &c->build;
1682 struct tgsi_full_instruction *tgsi_inst = &c->token->FullInstruction;
1683 unsigned i;
1684 unsigned tgsi_op = tgsi_inst->Instruction.Opcode;
1685 struct tgsi_full_dst_register *tgsi_dst = &tgsi_inst->Dst[0];
1686
1687 if (tgsi_op == TGSI_OPCODE_END)
1688 return;
1689
1690 nir_ssa_def *src[TGSI_FULL_MAX_SRC_REGISTERS];
1691 for (i = 0; i < tgsi_inst->Instruction.NumSrcRegs; i++) {
1692 src[i] = ttn_get_src(c, &tgsi_inst->Src[i]);
1693 }
1694 nir_alu_dest dest = ttn_get_dest(c, tgsi_dst);
1695
1696 switch (tgsi_op) {
1697 case TGSI_OPCODE_RSQ:
1698 ttn_move_dest(b, dest, nir_frsq(b, ttn_channel(b, src[0], X)));
1699 break;
1700
1701 case TGSI_OPCODE_SQRT:
1702 ttn_move_dest(b, dest, nir_fsqrt(b, ttn_channel(b, src[0], X)));
1703 break;
1704
1705 case TGSI_OPCODE_RCP:
1706 ttn_move_dest(b, dest, nir_frcp(b, ttn_channel(b, src[0], X)));
1707 break;
1708
1709 case TGSI_OPCODE_EX2:
1710 ttn_move_dest(b, dest, nir_fexp2(b, ttn_channel(b, src[0], X)));
1711 break;
1712
1713 case TGSI_OPCODE_LG2:
1714 ttn_move_dest(b, dest, nir_flog2(b, ttn_channel(b, src[0], X)));
1715 break;
1716
1717 case TGSI_OPCODE_POW:
1718 ttn_move_dest(b, dest, nir_fpow(b,
1719 ttn_channel(b, src[0], X),
1720 ttn_channel(b, src[1], X)));
1721 break;
1722
1723 case TGSI_OPCODE_COS:
1724 ttn_move_dest(b, dest, nir_fcos(b, ttn_channel(b, src[0], X)));
1725 break;
1726
1727 case TGSI_OPCODE_SIN:
1728 ttn_move_dest(b, dest, nir_fsin(b, ttn_channel(b, src[0], X)));
1729 break;
1730
1731 case TGSI_OPCODE_ARL:
1732 ttn_arl(b, op_trans[tgsi_op], dest, src);
1733 break;
1734
1735 case TGSI_OPCODE_EXP:
1736 ttn_exp(b, op_trans[tgsi_op], dest, src);
1737 break;
1738
1739 case TGSI_OPCODE_LOG:
1740 ttn_log(b, op_trans[tgsi_op], dest, src);
1741 break;
1742
1743 case TGSI_OPCODE_DST:
1744 ttn_dst(b, op_trans[tgsi_op], dest, src);
1745 break;
1746
1747 case TGSI_OPCODE_LIT:
1748 ttn_lit(b, op_trans[tgsi_op], dest, src);
1749 break;
1750
1751 case TGSI_OPCODE_CLAMP:
1752 ttn_clamp(b, op_trans[tgsi_op], dest, src);
1753 break;
1754
1755 case TGSI_OPCODE_XPD:
1756 ttn_xpd(b, op_trans[tgsi_op], dest, src);
1757 break;
1758
1759 case TGSI_OPCODE_DP2:
1760 ttn_dp2(b, op_trans[tgsi_op], dest, src);
1761 break;
1762
1763 case TGSI_OPCODE_DP3:
1764 ttn_dp3(b, op_trans[tgsi_op], dest, src);
1765 break;
1766
1767 case TGSI_OPCODE_DP4:
1768 ttn_dp4(b, op_trans[tgsi_op], dest, src);
1769 break;
1770
1771 case TGSI_OPCODE_DP2A:
1772 ttn_dp2a(b, op_trans[tgsi_op], dest, src);
1773 break;
1774
1775 case TGSI_OPCODE_DPH:
1776 ttn_dph(b, op_trans[tgsi_op], dest, src);
1777 break;
1778
1779 case TGSI_OPCODE_UMAD:
1780 ttn_umad(b, op_trans[tgsi_op], dest, src);
1781 break;
1782
1783 case TGSI_OPCODE_LRP:
1784 ttn_move_dest(b, dest, nir_flrp(b, src[2], src[1], src[0]));
1785 break;
1786
1787 case TGSI_OPCODE_KILL:
1788 ttn_kill(b, op_trans[tgsi_op], dest, src);
1789 break;
1790
1791 case TGSI_OPCODE_ARR:
1792 ttn_arr(b, op_trans[tgsi_op], dest, src);
1793 break;
1794
1795 case TGSI_OPCODE_CMP:
1796 ttn_cmp(b, op_trans[tgsi_op], dest, src);
1797 break;
1798
1799 case TGSI_OPCODE_UCMP:
1800 ttn_ucmp(b, op_trans[tgsi_op], dest, src);
1801 break;
1802
1803 case TGSI_OPCODE_SCS:
1804 ttn_scs(b, op_trans[tgsi_op], dest, src);
1805 break;
1806
1807 case TGSI_OPCODE_SGT:
1808 ttn_sgt(b, op_trans[tgsi_op], dest, src);
1809 break;
1810
1811 case TGSI_OPCODE_SLE:
1812 ttn_sle(b, op_trans[tgsi_op], dest, src);
1813 break;
1814
1815 case TGSI_OPCODE_KILL_IF:
1816 ttn_kill_if(b, op_trans[tgsi_op], dest, src);
1817 break;
1818
1819 case TGSI_OPCODE_TEX:
1820 case TGSI_OPCODE_TXP:
1821 case TGSI_OPCODE_TXL:
1822 case TGSI_OPCODE_TXB:
1823 case TGSI_OPCODE_TXD:
1824 case TGSI_OPCODE_TEX2:
1825 case TGSI_OPCODE_TXL2:
1826 case TGSI_OPCODE_TXB2:
1827 case TGSI_OPCODE_TXQ_LZ:
1828 case TGSI_OPCODE_TXF:
1829 case TGSI_OPCODE_TG4:
1830 case TGSI_OPCODE_LODQ:
1831 ttn_tex(c, dest, src);
1832 break;
1833
1834 case TGSI_OPCODE_TXQ:
1835 ttn_txq(c, dest, src);
1836 break;
1837
1838 case TGSI_OPCODE_NOP:
1839 break;
1840
1841 case TGSI_OPCODE_IF:
1842 ttn_if(c, src[0], false);
1843 break;
1844
1845 case TGSI_OPCODE_UIF:
1846 ttn_if(c, src[0], true);
1847 break;
1848
1849 case TGSI_OPCODE_ELSE:
1850 ttn_else(c);
1851 break;
1852
1853 case TGSI_OPCODE_ENDIF:
1854 ttn_endif(c);
1855 break;
1856
1857 case TGSI_OPCODE_BGNLOOP:
1858 ttn_bgnloop(c);
1859 break;
1860
1861 case TGSI_OPCODE_BRK:
1862 ttn_brk(b);
1863 break;
1864
1865 case TGSI_OPCODE_CONT:
1866 ttn_cont(b);
1867 break;
1868
1869 case TGSI_OPCODE_ENDLOOP:
1870 ttn_endloop(c);
1871 break;
1872
1873 default:
1874 if (op_trans[tgsi_op] != 0 || tgsi_op == TGSI_OPCODE_MOV) {
1875 ttn_alu(b, op_trans[tgsi_op], dest, src);
1876 } else {
1877 fprintf(stderr, "unknown TGSI opcode: %s\n",
1878 tgsi_get_opcode_name(tgsi_op));
1879 abort();
1880 }
1881 break;
1882 }
1883
1884 if (tgsi_inst->Instruction.Saturate) {
1885 assert(!dest.dest.is_ssa);
1886 ttn_move_dest(b, dest, nir_fsat(b, ttn_src_for_dest(b, &dest)));
1887 }
1888
1889 /* if the dst has a matching var, append store_global to move
1890 * output from reg to var
1891 */
1892 nir_variable *var = ttn_get_var(c, tgsi_dst);
1893 if (var) {
1894 unsigned index = tgsi_dst->Register.Index;
1895 unsigned offset = c->temp_regs[index].offset;
1896 nir_intrinsic_instr *store =
1897 nir_intrinsic_instr_create(b->shader, nir_intrinsic_store_var);
1898 struct tgsi_ind_register *indirect = tgsi_dst->Register.Indirect ?
1899 &tgsi_dst->Indirect : NULL;
1900
1901 store->num_components = 4;
1902 store->const_index[0] = 0xf;
1903 store->variables[0] = ttn_array_deref(c, store, var, offset, indirect);
1904 store->src[0] = nir_src_for_reg(dest.dest.reg.reg);
1905
1906 nir_builder_instr_insert(b, &store->instr);
1907 }
1908 }
1909
1910 /**
1911 * Puts a NIR intrinsic to store of each TGSI_FILE_OUTPUT value to the output
1912 * variables at the end of the shader.
1913 *
1914 * We don't generate these incrementally as the TGSI_FILE_OUTPUT values are
1915 * written, because there's no output load intrinsic, which means we couldn't
1916 * handle writemasks.
1917 */
1918 static void
1919 ttn_add_output_stores(struct ttn_compile *c)
1920 {
1921 nir_builder *b = &c->build;
1922
1923 foreach_list_typed(nir_variable, var, node, &b->shader->outputs) {
1924 unsigned array_len = MAX2(glsl_get_length(var->type), 1);
1925 unsigned i;
1926
1927 for (i = 0; i < array_len; i++) {
1928 nir_intrinsic_instr *store =
1929 nir_intrinsic_instr_create(b->shader, nir_intrinsic_store_output);
1930 unsigned loc = var->data.driver_location + i;
1931 store->num_components = 4;
1932 store->src[0].reg.reg = c->output_regs[loc].reg;
1933 store->src[0].reg.base_offset = c->output_regs[loc].offset;
1934 store->const_index[0] = loc;
1935 store->const_index[1] = 0xf; /* writemask */
1936 store->src[1] = nir_src_for_ssa(nir_imm_int(b, 0));
1937 nir_builder_instr_insert(b, &store->instr);
1938 }
1939 }
1940 }
1941
1942 static gl_shader_stage
1943 tgsi_processor_to_shader_stage(unsigned processor)
1944 {
1945 switch (processor) {
1946 case TGSI_PROCESSOR_FRAGMENT: return MESA_SHADER_FRAGMENT;
1947 case TGSI_PROCESSOR_VERTEX: return MESA_SHADER_VERTEX;
1948 case TGSI_PROCESSOR_GEOMETRY: return MESA_SHADER_GEOMETRY;
1949 case TGSI_PROCESSOR_TESS_CTRL: return MESA_SHADER_TESS_CTRL;
1950 case TGSI_PROCESSOR_TESS_EVAL: return MESA_SHADER_TESS_EVAL;
1951 case TGSI_PROCESSOR_COMPUTE: return MESA_SHADER_COMPUTE;
1952 default:
1953 unreachable("invalid TGSI processor");
1954 }
1955 }
1956
1957 struct nir_shader *
1958 tgsi_to_nir(const void *tgsi_tokens,
1959 const nir_shader_compiler_options *options)
1960 {
1961 struct tgsi_parse_context parser;
1962 struct tgsi_shader_info scan;
1963 struct ttn_compile *c;
1964 struct nir_shader *s;
1965 int ret;
1966
1967 c = rzalloc(NULL, struct ttn_compile);
1968
1969 tgsi_scan_shader(tgsi_tokens, &scan);
1970 c->scan = &scan;
1971
1972 nir_builder_init_simple_shader(&c->build, NULL,
1973 tgsi_processor_to_shader_stage(scan.processor),
1974 options);
1975 s = c->build.shader;
1976
1977 s->num_inputs = scan.file_max[TGSI_FILE_INPUT] + 1;
1978 s->num_uniforms = scan.const_file_max[0] + 1;
1979 s->num_outputs = scan.file_max[TGSI_FILE_OUTPUT] + 1;
1980
1981 c->output_regs = rzalloc_array(c, struct ttn_reg_info,
1982 scan.file_max[TGSI_FILE_OUTPUT] + 1);
1983 c->temp_regs = rzalloc_array(c, struct ttn_reg_info,
1984 scan.file_max[TGSI_FILE_TEMPORARY] + 1);
1985 c->imm_defs = rzalloc_array(c, nir_ssa_def *,
1986 scan.file_max[TGSI_FILE_IMMEDIATE] + 1);
1987
1988 c->num_samp_types = scan.file_max[TGSI_FILE_SAMPLER_VIEW] + 1;
1989 c->samp_types = rzalloc_array(c, nir_alu_type, c->num_samp_types);
1990
1991 c->if_stack = rzalloc_array(c, nir_cursor,
1992 (scan.opcode_count[TGSI_OPCODE_IF] +
1993 scan.opcode_count[TGSI_OPCODE_UIF]) * 2);
1994 c->loop_stack = rzalloc_array(c, nir_cursor,
1995 scan.opcode_count[TGSI_OPCODE_BGNLOOP]);
1996
1997 ret = tgsi_parse_init(&parser, tgsi_tokens);
1998 assert(ret == TGSI_PARSE_OK);
1999
2000 while (!tgsi_parse_end_of_tokens(&parser)) {
2001 tgsi_parse_token(&parser);
2002 c->token = &parser.FullToken;
2003
2004 switch (parser.FullToken.Token.Type) {
2005 case TGSI_TOKEN_TYPE_DECLARATION:
2006 ttn_emit_declaration(c);
2007 break;
2008
2009 case TGSI_TOKEN_TYPE_INSTRUCTION:
2010 ttn_emit_instruction(c);
2011 break;
2012
2013 case TGSI_TOKEN_TYPE_IMMEDIATE:
2014 ttn_emit_immediate(c);
2015 break;
2016 }
2017 }
2018
2019 tgsi_parse_free(&parser);
2020
2021 ttn_add_output_stores(c);
2022
2023 ralloc_free(c);
2024 return s;
2025 }