gallium/ttn: add support for system values
[mesa.git] / src / gallium / auxiliary / nir / tgsi_to_nir.c
1 /*
2 * Copyright © 2014-2015 Broadcom
3 * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 #include "util/ralloc.h"
26 #include "glsl/nir/nir.h"
27 #include "glsl/nir/nir_builder.h"
28 #include "glsl/list.h"
29 #include "glsl/shader_enums.h"
30
31 #include "nir/tgsi_to_nir.h"
32 #include "tgsi/tgsi_parse.h"
33 #include "tgsi/tgsi_dump.h"
34 #include "tgsi/tgsi_info.h"
35 #include "tgsi/tgsi_scan.h"
36
37 #define SWIZ(X, Y, Z, W) (unsigned[4]){ \
38 TGSI_SWIZZLE_##X, \
39 TGSI_SWIZZLE_##Y, \
40 TGSI_SWIZZLE_##Z, \
41 TGSI_SWIZZLE_##W, \
42 }
43
44 struct ttn_reg_info {
45 /** nir register containing this TGSI index. */
46 nir_register *reg;
47 nir_variable *var;
48 /** Offset (in vec4s) from the start of var for this TGSI index. */
49 int offset;
50 };
51
52 struct ttn_compile {
53 union tgsi_full_token *token;
54 nir_builder build;
55 struct nir_shader *s;
56 struct tgsi_shader_info *scan;
57
58 struct ttn_reg_info *output_regs;
59 struct ttn_reg_info *temp_regs;
60 nir_ssa_def **imm_defs;
61
62 nir_register *addr_reg;
63
64 /**
65 * Stack of cf_node_lists where instructions should be pushed as we pop
66 * back out of the control flow stack.
67 *
68 * For each IF/ELSE/ENDIF block, if_stack[if_stack_pos] has where the else
69 * instructions should be placed, and if_stack[if_stack_pos - 1] has where
70 * the next instructions outside of the if/then/else block go.
71 */
72 struct exec_list **if_stack;
73 unsigned if_stack_pos;
74
75 /**
76 * Stack of cf_node_lists where instructions should be pushed as we pop
77 * back out of the control flow stack.
78 *
79 * loop_stack[loop_stack_pos - 1] contains the cf_node_list for the outside
80 * of the loop.
81 */
82 struct exec_list **loop_stack;
83 unsigned loop_stack_pos;
84
85 /* How many TGSI_FILE_IMMEDIATE vec4s have been parsed so far. */
86 unsigned next_imm;
87 };
88
89 #define ttn_swizzle(b, src, x, y, z, w) \
90 nir_swizzle(b, src, SWIZ(x, y, z, w), 4, false)
91 #define ttn_channel(b, src, swiz) \
92 nir_swizzle(b, src, SWIZ(swiz, swiz, swiz, swiz), 1, false)
93
94 static nir_ssa_def *
95 ttn_src_for_dest(nir_builder *b, nir_alu_dest *dest)
96 {
97 nir_alu_src src;
98 memset(&src, 0, sizeof(src));
99
100 if (dest->dest.is_ssa)
101 src.src = nir_src_for_ssa(&dest->dest.ssa);
102 else {
103 assert(!dest->dest.reg.indirect);
104 src.src = nir_src_for_reg(dest->dest.reg.reg);
105 src.src.reg.base_offset = dest->dest.reg.base_offset;
106 }
107
108 for (int i = 0; i < 4; i++)
109 src.swizzle[i] = i;
110
111 return nir_fmov_alu(b, src, 4);
112 }
113
114 static void
115 ttn_emit_declaration(struct ttn_compile *c)
116 {
117 nir_builder *b = &c->build;
118 struct tgsi_full_declaration *decl = &c->token->FullDeclaration;
119 unsigned array_size = decl->Range.Last - decl->Range.First + 1;
120 unsigned file = decl->Declaration.File;
121 unsigned i;
122
123 if (file == TGSI_FILE_TEMPORARY) {
124 if (decl->Declaration.Array) {
125 /* for arrays, we create variables instead of registers: */
126 nir_variable *var = rzalloc(b->shader, nir_variable);
127
128 var->type = glsl_array_type(glsl_vec4_type(), array_size);
129 var->data.mode = nir_var_global;
130 var->name = ralloc_asprintf(var, "arr_%d", decl->Array.ArrayID);
131
132 exec_list_push_tail(&b->shader->globals, &var->node);
133
134 for (i = 0; i < array_size; i++) {
135 /* point all the matching slots to the same var,
136 * with appropriate offset set, mostly just so
137 * we know what to do when tgsi does a non-indirect
138 * access
139 */
140 c->temp_regs[decl->Range.First + i].reg = NULL;
141 c->temp_regs[decl->Range.First + i].var = var;
142 c->temp_regs[decl->Range.First + i].offset = i;
143 }
144 } else {
145 for (i = 0; i < array_size; i++) {
146 nir_register *reg = nir_local_reg_create(b->impl);
147 reg->num_components = 4;
148 c->temp_regs[decl->Range.First + i].reg = reg;
149 c->temp_regs[decl->Range.First + i].var = NULL;
150 c->temp_regs[decl->Range.First + i].offset = 0;
151 }
152 }
153 } else if (file == TGSI_FILE_ADDRESS) {
154 c->addr_reg = nir_local_reg_create(b->impl);
155 c->addr_reg->num_components = 4;
156 } else if (file == TGSI_FILE_SYSTEM_VALUE) {
157 /* Nothing to record for system values. */
158 } else if (file == TGSI_FILE_SAMPLER) {
159 /* Nothing to record for samplers. */
160 } else {
161 nir_variable *var;
162 assert(file == TGSI_FILE_INPUT ||
163 file == TGSI_FILE_OUTPUT ||
164 file == TGSI_FILE_CONSTANT);
165
166 var = rzalloc(b->shader, nir_variable);
167 var->data.driver_location = decl->Range.First;
168
169 var->type = glsl_vec4_type();
170 if (array_size > 1)
171 var->type = glsl_array_type(var->type, array_size);
172
173 switch (file) {
174 case TGSI_FILE_INPUT:
175 var->data.read_only = true;
176 var->data.mode = nir_var_shader_in;
177 var->name = ralloc_asprintf(var, "in_%d", decl->Range.First);
178
179 /* We should probably translate to a VERT_ATTRIB_* or VARYING_SLOT_*
180 * instead, but nothing in NIR core is looking at the value
181 * currently, and this is less change to drivers.
182 */
183 var->data.location = decl->Semantic.Name;
184 var->data.index = decl->Semantic.Index;
185
186 /* We definitely need to translate the interpolation field, because
187 * nir_print will decode it.
188 */
189 switch (decl->Interp.Interpolate) {
190 case TGSI_INTERPOLATE_CONSTANT:
191 var->data.interpolation = INTERP_QUALIFIER_FLAT;
192 break;
193 case TGSI_INTERPOLATE_LINEAR:
194 var->data.interpolation = INTERP_QUALIFIER_NOPERSPECTIVE;
195 break;
196 case TGSI_INTERPOLATE_PERSPECTIVE:
197 var->data.interpolation = INTERP_QUALIFIER_SMOOTH;
198 break;
199 }
200
201 exec_list_push_tail(&b->shader->inputs, &var->node);
202 break;
203 case TGSI_FILE_OUTPUT: {
204 /* Since we can't load from outputs in the IR, we make temporaries
205 * for the outputs and emit stores to the real outputs at the end of
206 * the shader.
207 */
208 nir_register *reg = nir_local_reg_create(b->impl);
209 reg->num_components = 4;
210 if (array_size > 1)
211 reg->num_array_elems = array_size;
212
213 var->data.mode = nir_var_shader_out;
214 var->name = ralloc_asprintf(var, "out_%d", decl->Range.First);
215
216 var->data.location = decl->Semantic.Name;
217 var->data.index = decl->Semantic.Index;
218
219 for (i = 0; i < array_size; i++) {
220 c->output_regs[decl->Range.First + i].offset = i;
221 c->output_regs[decl->Range.First + i].reg = reg;
222 }
223
224 exec_list_push_tail(&b->shader->outputs, &var->node);
225 }
226 break;
227 case TGSI_FILE_CONSTANT:
228 var->data.mode = nir_var_uniform;
229 var->name = ralloc_asprintf(var, "uniform_%d", decl->Range.First);
230
231 exec_list_push_tail(&b->shader->uniforms, &var->node);
232 break;
233 default:
234 unreachable("bad declaration file");
235 return;
236 }
237
238 }
239 }
240
241 static void
242 ttn_emit_immediate(struct ttn_compile *c)
243 {
244 nir_builder *b = &c->build;
245 struct tgsi_full_immediate *tgsi_imm = &c->token->FullImmediate;
246 nir_load_const_instr *load_const;
247 int i;
248
249 load_const = nir_load_const_instr_create(b->shader, 4);
250 c->imm_defs[c->next_imm] = &load_const->def;
251 c->next_imm++;
252
253 for (i = 0; i < 4; i++)
254 load_const->value.u[i] = tgsi_imm->u[i].Uint;
255
256 nir_instr_insert_after_cf_list(b->cf_node_list, &load_const->instr);
257 }
258
259 static nir_src *
260 ttn_src_for_indirect(struct ttn_compile *c, struct tgsi_ind_register *indirect);
261
262 /* generate either a constant or indirect deref chain for accessing an
263 * array variable.
264 */
265 static nir_deref_var *
266 ttn_array_deref(struct ttn_compile *c, nir_intrinsic_instr *instr,
267 nir_variable *var, unsigned offset,
268 struct tgsi_ind_register *indirect)
269 {
270 nir_deref_var *deref = nir_deref_var_create(instr, var);
271 nir_deref_array *arr = nir_deref_array_create(deref);
272
273 arr->base_offset = offset;
274 arr->deref.type = glsl_get_array_element(var->type);
275
276 if (indirect) {
277 arr->deref_array_type = nir_deref_array_type_indirect;
278 arr->indirect = *ttn_src_for_indirect(c, indirect);
279 } else {
280 arr->deref_array_type = nir_deref_array_type_direct;
281 }
282
283 deref->deref.child = &arr->deref;
284
285 return deref;
286 }
287
288 static nir_src
289 ttn_src_for_file_and_index(struct ttn_compile *c, unsigned file, unsigned index,
290 struct tgsi_ind_register *indirect)
291 {
292 nir_builder *b = &c->build;
293 nir_src src;
294
295 memset(&src, 0, sizeof(src));
296
297 switch (file) {
298 case TGSI_FILE_TEMPORARY:
299 if (c->temp_regs[index].var) {
300 unsigned offset = c->temp_regs[index].offset;
301 nir_variable *var = c->temp_regs[index].var;
302 nir_intrinsic_instr *load;
303
304 load = nir_intrinsic_instr_create(b->shader,
305 nir_intrinsic_load_var);
306 load->num_components = 4;
307 load->variables[0] = ttn_array_deref(c, load, var, offset, indirect);
308
309 nir_ssa_dest_init(&load->instr, &load->dest, 4, NULL);
310 nir_instr_insert_after_cf_list(b->cf_node_list, &load->instr);
311
312 src = nir_src_for_ssa(&load->dest.ssa);
313
314 } else {
315 assert(!indirect);
316 src.reg.reg = c->temp_regs[index].reg;
317 }
318 break;
319
320 case TGSI_FILE_ADDRESS:
321 src.reg.reg = c->addr_reg;
322 break;
323
324 case TGSI_FILE_IMMEDIATE:
325 src = nir_src_for_ssa(c->imm_defs[index]);
326 assert(!indirect);
327 break;
328
329 case TGSI_FILE_SYSTEM_VALUE: {
330 nir_intrinsic_instr *load;
331 nir_intrinsic_op op;
332 unsigned ncomp = 1;
333
334 switch (c->scan->system_value_semantic_name[index]) {
335 case TGSI_SEMANTIC_VERTEXID_NOBASE:
336 op = nir_intrinsic_load_vertex_id_zero_base;
337 break;
338 case TGSI_SEMANTIC_VERTEXID:
339 op = nir_intrinsic_load_vertex_id;
340 break;
341 case TGSI_SEMANTIC_BASEVERTEX:
342 op = nir_intrinsic_load_base_vertex;
343 break;
344 case TGSI_SEMANTIC_INSTANCEID:
345 op = nir_intrinsic_load_instance_id;
346 break;
347 default:
348 unreachable("bad system value");
349 }
350
351 load = nir_intrinsic_instr_create(b->shader, op);
352 load->num_components = ncomp;
353
354 nir_ssa_dest_init(&load->instr, &load->dest, ncomp, NULL);
355 nir_instr_insert_after_cf_list(b->cf_node_list, &load->instr);
356
357 src = nir_src_for_ssa(&load->dest.ssa);
358 break;
359 }
360
361 case TGSI_FILE_INPUT:
362 case TGSI_FILE_CONSTANT: {
363 nir_intrinsic_instr *load;
364
365 switch (file) {
366 case TGSI_FILE_INPUT:
367 load = nir_intrinsic_instr_create(b->shader,
368 indirect ?
369 nir_intrinsic_load_input_indirect :
370 nir_intrinsic_load_input);
371 break;
372 case TGSI_FILE_CONSTANT:
373 load = nir_intrinsic_instr_create(b->shader,
374 indirect ?
375 nir_intrinsic_load_uniform_indirect :
376 nir_intrinsic_load_uniform);
377 break;
378 default:
379 unreachable("No other load files supported");
380 break;
381 }
382
383 load->num_components = 4;
384 load->const_index[0] = index;
385 load->const_index[1] = 1;
386 if (indirect) {
387 nir_alu_src indirect_address;
388 memset(&indirect_address, 0, sizeof(indirect_address));
389 indirect_address.src = nir_src_for_reg(c->addr_reg);
390 for (int i = 0; i < 4; i++)
391 indirect_address.swizzle[i] = indirect->Swizzle;
392 load->src[0] = nir_src_for_ssa(nir_imov_alu(b, indirect_address, 1));
393 }
394 nir_ssa_dest_init(&load->instr, &load->dest, 4, NULL);
395 nir_instr_insert_after_cf_list(b->cf_node_list, &load->instr);
396
397 src = nir_src_for_ssa(&load->dest.ssa);
398 break;
399 }
400
401 default:
402 unreachable("bad src file");
403 }
404
405
406 return src;
407 }
408
409 static nir_src *
410 ttn_src_for_indirect(struct ttn_compile *c, struct tgsi_ind_register *indirect)
411 {
412 nir_builder *b = &c->build;
413 nir_alu_src src;
414 memset(&src, 0, sizeof(src));
415 for (int i = 0; i < 4; i++)
416 src.swizzle[i] = indirect->Swizzle;
417 src.src = ttn_src_for_file_and_index(c,
418 indirect->File,
419 indirect->Index, NULL);
420 nir_src *result = ralloc(b->shader, nir_src);
421 *result = nir_src_for_ssa(nir_imov_alu(b, src, 1));
422 return result;
423 }
424
425 static nir_alu_dest
426 ttn_get_dest(struct ttn_compile *c, struct tgsi_full_dst_register *tgsi_fdst)
427 {
428 struct tgsi_dst_register *tgsi_dst = &tgsi_fdst->Register;
429 nir_alu_dest dest;
430 unsigned index = tgsi_dst->Index;
431
432 memset(&dest, 0, sizeof(dest));
433
434 if (tgsi_dst->File == TGSI_FILE_TEMPORARY) {
435 if (c->temp_regs[index].var) {
436 nir_builder *b = &c->build;
437 nir_intrinsic_instr *load;
438 struct tgsi_ind_register *indirect =
439 tgsi_dst->Indirect ? &tgsi_fdst->Indirect : NULL;
440 nir_register *reg;
441
442 /* this works, because TGSI will give us a base offset
443 * (in case of indirect index) that points back into
444 * the array. Access can be direct or indirect, we
445 * don't really care. Just create a one-shot dst reg
446 * that will get store_var'd back into the array var
447 * at the end of ttn_emit_instruction()
448 */
449 reg = nir_local_reg_create(c->build.impl);
450 reg->num_components = 4;
451 dest.dest.reg.reg = reg;
452 dest.dest.reg.base_offset = 0;
453
454 /* since the alu op might not write to all components
455 * of the temporary, we must first do a load_var to
456 * get the previous array elements into the register.
457 * This is one area that NIR could use a bit of
458 * improvement (or opt pass to clean up the mess
459 * once things are scalarized)
460 */
461
462 load = nir_intrinsic_instr_create(c->build.shader,
463 nir_intrinsic_load_var);
464 load->num_components = 4;
465 load->variables[0] =
466 ttn_array_deref(c, load, c->temp_regs[index].var,
467 c->temp_regs[index].offset,
468 indirect);
469
470 load->dest = nir_dest_for_reg(reg);
471
472 nir_instr_insert_after_cf_list(b->cf_node_list, &load->instr);
473 } else {
474 assert(!tgsi_dst->Indirect);
475 dest.dest.reg.reg = c->temp_regs[index].reg;
476 dest.dest.reg.base_offset = c->temp_regs[index].offset;
477 }
478 } else if (tgsi_dst->File == TGSI_FILE_OUTPUT) {
479 dest.dest.reg.reg = c->output_regs[index].reg;
480 dest.dest.reg.base_offset = c->output_regs[index].offset;
481 } else if (tgsi_dst->File == TGSI_FILE_ADDRESS) {
482 assert(index == 0);
483 dest.dest.reg.reg = c->addr_reg;
484 }
485
486 dest.write_mask = tgsi_dst->WriteMask;
487 dest.saturate = false;
488
489 if (tgsi_dst->Indirect && (tgsi_dst->File != TGSI_FILE_TEMPORARY))
490 dest.dest.reg.indirect = ttn_src_for_indirect(c, &tgsi_fdst->Indirect);
491
492 return dest;
493 }
494
495 static nir_variable *
496 ttn_get_var(struct ttn_compile *c, struct tgsi_full_dst_register *tgsi_fdst)
497 {
498 struct tgsi_dst_register *tgsi_dst = &tgsi_fdst->Register;
499 unsigned index = tgsi_dst->Index;
500
501 if (tgsi_dst->File == TGSI_FILE_TEMPORARY) {
502 /* we should not have an indirect when there is no var! */
503 if (!c->temp_regs[index].var)
504 assert(!tgsi_dst->Indirect);
505 return c->temp_regs[index].var;
506 }
507
508 return NULL;
509 }
510
511 static nir_ssa_def *
512 ttn_get_src(struct ttn_compile *c, struct tgsi_full_src_register *tgsi_fsrc)
513 {
514 nir_builder *b = &c->build;
515 struct tgsi_src_register *tgsi_src = &tgsi_fsrc->Register;
516 unsigned tgsi_opcode = c->token->FullInstruction.Instruction.Opcode;
517 unsigned tgsi_src_type = tgsi_opcode_infer_src_type(tgsi_opcode);
518 bool src_is_float = !(tgsi_src_type == TGSI_TYPE_SIGNED ||
519 tgsi_src_type == TGSI_TYPE_UNSIGNED);
520 nir_alu_src src;
521
522 memset(&src, 0, sizeof(src));
523
524 if (tgsi_src->File == TGSI_FILE_NULL) {
525 return nir_imm_float(b, 0.0);
526 } else if (tgsi_src->File == TGSI_FILE_SAMPLER) {
527 /* Only the index of the sampler gets used in texturing, and it will
528 * handle looking that up on its own instead of using the nir_alu_src.
529 */
530 assert(!tgsi_src->Indirect);
531 return NULL;
532 } else {
533 src.src = ttn_src_for_file_and_index(c,
534 tgsi_src->File,
535 tgsi_src->Index,
536 (tgsi_src->Indirect ?
537 &tgsi_fsrc->Indirect : NULL));
538 }
539
540 src.swizzle[0] = tgsi_src->SwizzleX;
541 src.swizzle[1] = tgsi_src->SwizzleY;
542 src.swizzle[2] = tgsi_src->SwizzleZ;
543 src.swizzle[3] = tgsi_src->SwizzleW;
544
545 nir_ssa_def *def = nir_fmov_alu(b, src, 4);
546
547 if (tgsi_src->Absolute) {
548 if (src_is_float)
549 def = nir_fabs(b, def);
550 else
551 def = nir_iabs(b, def);
552 }
553
554 if (tgsi_src->Negate) {
555 if (src_is_float)
556 def = nir_fneg(b, def);
557 else
558 def = nir_ineg(b, def);
559 }
560
561 return def;
562 }
563
564 static void
565 ttn_alu(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
566 {
567 unsigned num_srcs = nir_op_infos[op].num_inputs;
568 nir_alu_instr *instr = nir_alu_instr_create(b->shader, op);
569 unsigned i;
570
571 for (i = 0; i < num_srcs; i++)
572 instr->src[i].src = nir_src_for_ssa(src[i]);
573
574 instr->dest = dest;
575 nir_instr_insert_after_cf_list(b->cf_node_list, &instr->instr);
576 }
577
578 static void
579 ttn_move_dest_masked(nir_builder *b, nir_alu_dest dest,
580 nir_ssa_def *def, unsigned write_mask)
581 {
582 if (!(dest.write_mask & write_mask))
583 return;
584
585 nir_alu_instr *mov = nir_alu_instr_create(b->shader, nir_op_imov);
586 mov->dest = dest;
587 mov->dest.write_mask &= write_mask;
588 mov->src[0].src = nir_src_for_ssa(def);
589 for (unsigned i = def->num_components; i < 4; i++)
590 mov->src[0].swizzle[i] = def->num_components - 1;
591 nir_instr_insert_after_cf_list(b->cf_node_list, &mov->instr);
592 }
593
594 static void
595 ttn_move_dest(nir_builder *b, nir_alu_dest dest, nir_ssa_def *def)
596 {
597 ttn_move_dest_masked(b, dest, def, TGSI_WRITEMASK_XYZW);
598 }
599
600 static void
601 ttn_arl(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
602 {
603 ttn_move_dest(b, dest, nir_f2i(b, nir_ffloor(b, src[0])));
604 }
605
606 /* EXP - Approximate Exponential Base 2
607 * dst.x = 2^{\lfloor src.x\rfloor}
608 * dst.y = src.x - \lfloor src.x\rfloor
609 * dst.z = 2^{src.x}
610 * dst.w = 1.0
611 */
612 static void
613 ttn_exp(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
614 {
615 nir_ssa_def *srcx = ttn_channel(b, src[0], X);
616
617 ttn_move_dest_masked(b, dest, nir_fexp2(b, nir_ffloor(b, srcx)),
618 TGSI_WRITEMASK_X);
619 ttn_move_dest_masked(b, dest, nir_fsub(b, srcx, nir_ffloor(b, srcx)),
620 TGSI_WRITEMASK_Y);
621 ttn_move_dest_masked(b, dest, nir_fexp2(b, srcx), TGSI_WRITEMASK_Z);
622 ttn_move_dest_masked(b, dest, nir_imm_float(b, 1.0), TGSI_WRITEMASK_W);
623 }
624
625 /* LOG - Approximate Logarithm Base 2
626 * dst.x = \lfloor\log_2{|src.x|}\rfloor
627 * dst.y = \frac{|src.x|}{2^{\lfloor\log_2{|src.x|}\rfloor}}
628 * dst.z = \log_2{|src.x|}
629 * dst.w = 1.0
630 */
631 static void
632 ttn_log(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
633 {
634 nir_ssa_def *abs_srcx = nir_fabs(b, ttn_channel(b, src[0], X));
635 nir_ssa_def *log2 = nir_flog2(b, abs_srcx);
636
637 ttn_move_dest_masked(b, dest, nir_ffloor(b, log2), TGSI_WRITEMASK_X);
638 ttn_move_dest_masked(b, dest,
639 nir_fdiv(b, abs_srcx, nir_fexp2(b, nir_ffloor(b, log2))),
640 TGSI_WRITEMASK_Y);
641 ttn_move_dest_masked(b, dest, nir_flog2(b, abs_srcx), TGSI_WRITEMASK_Z);
642 ttn_move_dest_masked(b, dest, nir_imm_float(b, 1.0), TGSI_WRITEMASK_W);
643 }
644
645 /* DST - Distance Vector
646 * dst.x = 1.0
647 * dst.y = src0.y \times src1.y
648 * dst.z = src0.z
649 * dst.w = src1.w
650 */
651 static void
652 ttn_dst(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
653 {
654 ttn_move_dest_masked(b, dest, nir_imm_float(b, 1.0), TGSI_WRITEMASK_X);
655 ttn_move_dest_masked(b, dest, nir_fmul(b, src[0], src[1]), TGSI_WRITEMASK_Y);
656 ttn_move_dest_masked(b, dest, nir_fmov(b, src[0]), TGSI_WRITEMASK_Z);
657 ttn_move_dest_masked(b, dest, nir_fmov(b, src[1]), TGSI_WRITEMASK_W);
658 }
659
660 /* LIT - Light Coefficients
661 * dst.x = 1.0
662 * dst.y = max(src.x, 0.0)
663 * dst.z = (src.x > 0.0) ? max(src.y, 0.0)^{clamp(src.w, -128.0, 128.0))} : 0
664 * dst.w = 1.0
665 */
666 static void
667 ttn_lit(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
668 {
669 ttn_move_dest_masked(b, dest, nir_imm_float(b, 1.0), TGSI_WRITEMASK_XW);
670
671 ttn_move_dest_masked(b, dest, nir_fmax(b, ttn_channel(b, src[0], X),
672 nir_imm_float(b, 0.0)), TGSI_WRITEMASK_Y);
673
674 if (dest.write_mask & TGSI_WRITEMASK_Z) {
675 nir_ssa_def *src0_y = ttn_channel(b, src[0], Y);
676 nir_ssa_def *wclamp = nir_fmax(b, nir_fmin(b, ttn_channel(b, src[0], W),
677 nir_imm_float(b, 128.0)),
678 nir_imm_float(b, -128.0));
679 nir_ssa_def *pow = nir_fpow(b, nir_fmax(b, src0_y, nir_imm_float(b, 0.0)),
680 wclamp);
681
682 ttn_move_dest_masked(b, dest,
683 nir_bcsel(b,
684 nir_fge(b,
685 nir_imm_float(b, 0.0),
686 ttn_channel(b, src[0], X)),
687 nir_imm_float(b, 0.0),
688 pow),
689 TGSI_WRITEMASK_Z);
690 }
691 }
692
693 /* SCS - Sine Cosine
694 * dst.x = \cos{src.x}
695 * dst.y = \sin{src.x}
696 * dst.z = 0.0
697 * dst.w = 1.0
698 */
699 static void
700 ttn_scs(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
701 {
702 ttn_move_dest_masked(b, dest, nir_fcos(b, ttn_channel(b, src[0], X)),
703 TGSI_WRITEMASK_X);
704 ttn_move_dest_masked(b, dest, nir_fsin(b, ttn_channel(b, src[0], X)),
705 TGSI_WRITEMASK_Y);
706 ttn_move_dest_masked(b, dest, nir_imm_float(b, 0.0), TGSI_WRITEMASK_Z);
707 ttn_move_dest_masked(b, dest, nir_imm_float(b, 1.0), TGSI_WRITEMASK_W);
708 }
709
710 static void
711 ttn_sle(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
712 {
713 ttn_move_dest(b, dest, nir_sge(b, src[1], src[0]));
714 }
715
716 static void
717 ttn_sgt(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
718 {
719 ttn_move_dest(b, dest, nir_slt(b, src[1], src[0]));
720 }
721
722 static void
723 ttn_clamp(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
724 {
725 ttn_move_dest(b, dest, nir_fmin(b, nir_fmax(b, src[0], src[1]), src[2]));
726 }
727
728 static void
729 ttn_xpd(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
730 {
731 ttn_move_dest_masked(b, dest,
732 nir_fsub(b,
733 nir_fmul(b,
734 ttn_swizzle(b, src[0], Y, Z, X, X),
735 ttn_swizzle(b, src[1], Z, X, Y, X)),
736 nir_fmul(b,
737 ttn_swizzle(b, src[1], Y, Z, X, X),
738 ttn_swizzle(b, src[0], Z, X, Y, X))),
739 TGSI_WRITEMASK_XYZ);
740 ttn_move_dest_masked(b, dest, nir_imm_float(b, 1.0), TGSI_WRITEMASK_W);
741 }
742
743 static void
744 ttn_dp2a(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
745 {
746 ttn_move_dest(b, dest,
747 ttn_channel(b, nir_fadd(b, nir_fdot2(b, src[0], src[1]),
748 src[2]),
749 X));
750 }
751
752 static void
753 ttn_dp2(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
754 {
755 ttn_move_dest(b, dest, nir_fdot2(b, src[0], src[1]));
756 }
757
758 static void
759 ttn_dp3(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
760 {
761 ttn_move_dest(b, dest, nir_fdot3(b, src[0], src[1]));
762 }
763
764 static void
765 ttn_dp4(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
766 {
767 ttn_move_dest(b, dest, nir_fdot4(b, src[0], src[1]));
768 }
769
770 static void
771 ttn_dph(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
772 {
773 ttn_move_dest(b, dest, nir_fadd(b, nir_fdot3(b, src[0], src[1]),
774 ttn_channel(b, src[1], W)));
775 }
776
777 static void
778 ttn_umad(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
779 {
780 ttn_move_dest(b, dest, nir_iadd(b, nir_imul(b, src[0], src[1]), src[2]));
781 }
782
783 static void
784 ttn_arr(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
785 {
786 ttn_move_dest(b, dest, nir_ffloor(b, nir_fadd(b, src[0], nir_imm_float(b, 0.5))));
787 }
788
789 static void
790 ttn_cmp(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
791 {
792 ttn_move_dest(b, dest, nir_bcsel(b,
793 nir_flt(b, src[0], nir_imm_float(b, 0.0)),
794 src[1], src[2]));
795 }
796
797 static void
798 ttn_ucmp(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
799 {
800 ttn_move_dest(b, dest, nir_bcsel(b,
801 nir_ine(b, src[0], nir_imm_int(b, 0)),
802 src[1], src[2]));
803 }
804
805 static void
806 ttn_kill(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
807 {
808 nir_intrinsic_instr *discard =
809 nir_intrinsic_instr_create(b->shader, nir_intrinsic_discard);
810 nir_instr_insert_after_cf_list(b->cf_node_list, &discard->instr);
811 }
812
813 static void
814 ttn_kill_if(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
815 {
816 nir_ssa_def *cmp = nir_bany4(b, nir_flt(b, src[0], nir_imm_float(b, 0.0)));
817 nir_intrinsic_instr *discard =
818 nir_intrinsic_instr_create(b->shader, nir_intrinsic_discard_if);
819 discard->src[0] = nir_src_for_ssa(cmp);
820 nir_instr_insert_after_cf_list(b->cf_node_list, &discard->instr);
821 }
822
823 static void
824 ttn_if(struct ttn_compile *c, nir_ssa_def *src, bool is_uint)
825 {
826 nir_builder *b = &c->build;
827
828 /* Save the outside-of-the-if-statement node list. */
829 c->if_stack[c->if_stack_pos] = b->cf_node_list;
830 c->if_stack_pos++;
831
832 src = ttn_channel(b, src, X);
833
834 nir_if *if_stmt = nir_if_create(b->shader);
835 if (is_uint) {
836 if_stmt->condition = nir_src_for_ssa(nir_ine(b, src, nir_imm_int(b, 0)));
837 } else {
838 if_stmt->condition = nir_src_for_ssa(nir_fne(b, src, nir_imm_int(b, 0)));
839 }
840 nir_cf_node_insert_end(b->cf_node_list, &if_stmt->cf_node);
841
842 nir_builder_insert_after_cf_list(b, &if_stmt->then_list);
843
844 c->if_stack[c->if_stack_pos] = &if_stmt->else_list;
845 c->if_stack_pos++;
846 }
847
848 static void
849 ttn_else(struct ttn_compile *c)
850 {
851 nir_builder *b = &c->build;
852
853 nir_builder_insert_after_cf_list(b, c->if_stack[c->if_stack_pos - 1]);
854 }
855
856 static void
857 ttn_endif(struct ttn_compile *c)
858 {
859 nir_builder *b = &c->build;
860
861 c->if_stack_pos -= 2;
862 nir_builder_insert_after_cf_list(b, c->if_stack[c->if_stack_pos]);
863 }
864
865 static void
866 ttn_bgnloop(struct ttn_compile *c)
867 {
868 nir_builder *b = &c->build;
869
870 /* Save the outside-of-the-loop node list. */
871 c->loop_stack[c->loop_stack_pos] = b->cf_node_list;
872 c->loop_stack_pos++;
873
874 nir_loop *loop = nir_loop_create(b->shader);
875 nir_cf_node_insert_end(b->cf_node_list, &loop->cf_node);
876
877 nir_builder_insert_after_cf_list(b, &loop->body);
878 }
879
880 static void
881 ttn_cont(nir_builder *b)
882 {
883 nir_jump_instr *instr = nir_jump_instr_create(b->shader, nir_jump_continue);
884 nir_instr_insert_after_cf_list(b->cf_node_list, &instr->instr);
885 }
886
887 static void
888 ttn_brk(nir_builder *b)
889 {
890 nir_jump_instr *instr = nir_jump_instr_create(b->shader, nir_jump_break);
891 nir_instr_insert_after_cf_list(b->cf_node_list, &instr->instr);
892 }
893
894 static void
895 ttn_endloop(struct ttn_compile *c)
896 {
897 nir_builder *b = &c->build;
898
899 c->loop_stack_pos--;
900 nir_builder_insert_after_cf_list(b, c->loop_stack[c->loop_stack_pos]);
901 }
902
903 static void
904 setup_texture_info(nir_tex_instr *instr, unsigned texture)
905 {
906 switch (texture) {
907 case TGSI_TEXTURE_1D:
908 instr->sampler_dim = GLSL_SAMPLER_DIM_1D;
909 break;
910 case TGSI_TEXTURE_1D_ARRAY:
911 instr->sampler_dim = GLSL_SAMPLER_DIM_1D;
912 instr->is_array = true;
913 break;
914 case TGSI_TEXTURE_SHADOW1D:
915 instr->sampler_dim = GLSL_SAMPLER_DIM_1D;
916 instr->is_shadow = true;
917 break;
918 case TGSI_TEXTURE_SHADOW1D_ARRAY:
919 instr->sampler_dim = GLSL_SAMPLER_DIM_1D;
920 instr->is_shadow = true;
921 instr->is_array = true;
922 break;
923 case TGSI_TEXTURE_2D:
924 instr->sampler_dim = GLSL_SAMPLER_DIM_2D;
925 break;
926 case TGSI_TEXTURE_2D_ARRAY:
927 instr->sampler_dim = GLSL_SAMPLER_DIM_2D;
928 instr->is_array = true;
929 break;
930 case TGSI_TEXTURE_2D_MSAA:
931 instr->sampler_dim = GLSL_SAMPLER_DIM_MS;
932 break;
933 case TGSI_TEXTURE_2D_ARRAY_MSAA:
934 instr->sampler_dim = GLSL_SAMPLER_DIM_MS;
935 instr->is_array = true;
936 break;
937 case TGSI_TEXTURE_SHADOW2D:
938 instr->sampler_dim = GLSL_SAMPLER_DIM_2D;
939 instr->is_shadow = true;
940 break;
941 case TGSI_TEXTURE_SHADOW2D_ARRAY:
942 instr->sampler_dim = GLSL_SAMPLER_DIM_2D;
943 instr->is_shadow = true;
944 instr->is_array = true;
945 break;
946 case TGSI_TEXTURE_3D:
947 instr->sampler_dim = GLSL_SAMPLER_DIM_3D;
948 break;
949 case TGSI_TEXTURE_CUBE:
950 instr->sampler_dim = GLSL_SAMPLER_DIM_CUBE;
951 break;
952 case TGSI_TEXTURE_CUBE_ARRAY:
953 instr->sampler_dim = GLSL_SAMPLER_DIM_CUBE;
954 instr->is_array = true;
955 break;
956 case TGSI_TEXTURE_SHADOWCUBE:
957 instr->sampler_dim = GLSL_SAMPLER_DIM_CUBE;
958 instr->is_shadow = true;
959 break;
960 case TGSI_TEXTURE_SHADOWCUBE_ARRAY:
961 instr->sampler_dim = GLSL_SAMPLER_DIM_CUBE;
962 instr->is_shadow = true;
963 instr->is_array = true;
964 break;
965 case TGSI_TEXTURE_RECT:
966 instr->sampler_dim = GLSL_SAMPLER_DIM_RECT;
967 break;
968 case TGSI_TEXTURE_SHADOWRECT:
969 instr->sampler_dim = GLSL_SAMPLER_DIM_RECT;
970 instr->is_shadow = true;
971 break;
972 default:
973 fprintf(stderr, "Unknown TGSI texture target %d\n", texture);
974 abort();
975 }
976 }
977
978 static void
979 ttn_tex(struct ttn_compile *c, nir_alu_dest dest, nir_ssa_def **src)
980 {
981 nir_builder *b = &c->build;
982 struct tgsi_full_instruction *tgsi_inst = &c->token->FullInstruction;
983 nir_tex_instr *instr;
984 nir_texop op;
985 unsigned num_srcs, samp = 1;
986
987 switch (tgsi_inst->Instruction.Opcode) {
988 case TGSI_OPCODE_TEX:
989 op = nir_texop_tex;
990 num_srcs = 1;
991 break;
992 case TGSI_OPCODE_TXP:
993 op = nir_texop_tex;
994 num_srcs = 2;
995 break;
996 case TGSI_OPCODE_TXB:
997 op = nir_texop_txb;
998 num_srcs = 2;
999 break;
1000 case TGSI_OPCODE_TXL:
1001 op = nir_texop_txl;
1002 num_srcs = 2;
1003 break;
1004 case TGSI_OPCODE_TXF:
1005 op = nir_texop_txf;
1006 num_srcs = 1;
1007 break;
1008 case TGSI_OPCODE_TXD:
1009 op = nir_texop_txd;
1010 num_srcs = 3;
1011 samp = 3;
1012 break;
1013
1014 default:
1015 fprintf(stderr, "unknown TGSI tex op %d\n", tgsi_inst->Instruction.Opcode);
1016 abort();
1017 }
1018
1019 if (tgsi_inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D ||
1020 tgsi_inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D_ARRAY ||
1021 tgsi_inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D ||
1022 tgsi_inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D_ARRAY ||
1023 tgsi_inst->Texture.Texture == TGSI_TEXTURE_SHADOWRECT ||
1024 tgsi_inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE ||
1025 tgsi_inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY) {
1026 num_srcs++;
1027 }
1028
1029 instr = nir_tex_instr_create(b->shader, num_srcs);
1030 instr->op = op;
1031
1032 setup_texture_info(instr, tgsi_inst->Texture.Texture);
1033
1034 switch (instr->sampler_dim) {
1035 case GLSL_SAMPLER_DIM_1D:
1036 case GLSL_SAMPLER_DIM_BUF:
1037 instr->coord_components = 1;
1038 break;
1039 case GLSL_SAMPLER_DIM_2D:
1040 case GLSL_SAMPLER_DIM_RECT:
1041 case GLSL_SAMPLER_DIM_EXTERNAL:
1042 case GLSL_SAMPLER_DIM_MS:
1043 instr->coord_components = 2;
1044 break;
1045 case GLSL_SAMPLER_DIM_3D:
1046 case GLSL_SAMPLER_DIM_CUBE:
1047 instr->coord_components = 3;
1048 break;
1049 }
1050
1051 if (instr->is_array)
1052 instr->coord_components++;
1053
1054 assert(tgsi_inst->Src[samp].Register.File == TGSI_FILE_SAMPLER);
1055 instr->sampler_index = tgsi_inst->Src[samp].Register.Index;
1056
1057 unsigned src_number = 0;
1058
1059 instr->src[src_number].src =
1060 nir_src_for_ssa(nir_swizzle(b, src[0], SWIZ(X, Y, Z, W),
1061 instr->coord_components, false));
1062 instr->src[src_number].src_type = nir_tex_src_coord;
1063 src_number++;
1064
1065 if (tgsi_inst->Instruction.Opcode == TGSI_OPCODE_TXP) {
1066 instr->src[src_number].src = nir_src_for_ssa(ttn_channel(b, src[0], W));
1067 instr->src[src_number].src_type = nir_tex_src_projector;
1068 src_number++;
1069 }
1070
1071 if (tgsi_inst->Instruction.Opcode == TGSI_OPCODE_TXB) {
1072 instr->src[src_number].src = nir_src_for_ssa(ttn_channel(b, src[0], W));
1073 instr->src[src_number].src_type = nir_tex_src_bias;
1074 src_number++;
1075 }
1076
1077 if (tgsi_inst->Instruction.Opcode == TGSI_OPCODE_TXL) {
1078 instr->src[src_number].src = nir_src_for_ssa(ttn_channel(b, src[0], W));
1079 instr->src[src_number].src_type = nir_tex_src_lod;
1080 src_number++;
1081 }
1082
1083 if (tgsi_inst->Instruction.Opcode == TGSI_OPCODE_TXD) {
1084 instr->src[src_number].src =
1085 nir_src_for_ssa(nir_swizzle(b, src[1], SWIZ(X, Y, Z, W),
1086 instr->coord_components, false));
1087 instr->src[src_number].src_type = nir_tex_src_ddx;
1088 src_number++;
1089 instr->src[src_number].src =
1090 nir_src_for_ssa(nir_swizzle(b, src[2], SWIZ(X, Y, Z, W),
1091 instr->coord_components, false));
1092 instr->src[src_number].src_type = nir_tex_src_ddy;
1093 src_number++;
1094 }
1095
1096 if (instr->is_shadow) {
1097 if (instr->coord_components < 3)
1098 instr->src[src_number].src = nir_src_for_ssa(ttn_channel(b, src[0], Z));
1099 else
1100 instr->src[src_number].src = nir_src_for_ssa(ttn_channel(b, src[0], W));
1101
1102 instr->src[src_number].src_type = nir_tex_src_comparitor;
1103 src_number++;
1104 }
1105
1106 assert(src_number == num_srcs);
1107
1108 nir_ssa_dest_init(&instr->instr, &instr->dest, 4, NULL);
1109 nir_instr_insert_after_cf_list(b->cf_node_list, &instr->instr);
1110
1111 /* Resolve the writemask on the texture op. */
1112 ttn_move_dest(b, dest, &instr->dest.ssa);
1113 }
1114
1115 /* TGSI_OPCODE_TXQ is actually two distinct operations:
1116 *
1117 * dst.x = texture\_width(unit, lod)
1118 * dst.y = texture\_height(unit, lod)
1119 * dst.z = texture\_depth(unit, lod)
1120 * dst.w = texture\_levels(unit)
1121 *
1122 * dst.xyz map to NIR txs opcode, and dst.w maps to query_levels
1123 */
1124 static void
1125 ttn_txq(struct ttn_compile *c, nir_alu_dest dest, nir_ssa_def **src)
1126 {
1127 nir_builder *b = &c->build;
1128 struct tgsi_full_instruction *tgsi_inst = &c->token->FullInstruction;
1129 nir_tex_instr *txs, *qlv;
1130
1131 txs = nir_tex_instr_create(b->shader, 1);
1132 txs->op = nir_texop_txs;
1133 setup_texture_info(txs, tgsi_inst->Texture.Texture);
1134
1135 qlv = nir_tex_instr_create(b->shader, 0);
1136 qlv->op = nir_texop_query_levels;
1137 setup_texture_info(qlv, tgsi_inst->Texture.Texture);
1138
1139 assert(tgsi_inst->Src[1].Register.File == TGSI_FILE_SAMPLER);
1140 txs->sampler_index = tgsi_inst->Src[1].Register.Index;
1141 qlv->sampler_index = tgsi_inst->Src[1].Register.Index;
1142
1143 /* only single src, the lod: */
1144 txs->src[0].src = nir_src_for_ssa(ttn_channel(b, src[0], X));
1145 txs->src[0].src_type = nir_tex_src_lod;
1146
1147 nir_ssa_dest_init(&txs->instr, &txs->dest, 3, NULL);
1148 nir_instr_insert_after_cf_list(b->cf_node_list, &txs->instr);
1149
1150 nir_ssa_dest_init(&qlv->instr, &qlv->dest, 1, NULL);
1151 nir_instr_insert_after_cf_list(b->cf_node_list, &qlv->instr);
1152
1153 ttn_move_dest_masked(b, dest, &txs->dest.ssa, TGSI_WRITEMASK_XYZ);
1154 ttn_move_dest_masked(b, dest, &qlv->dest.ssa, TGSI_WRITEMASK_W);
1155 }
1156
1157 static const nir_op op_trans[TGSI_OPCODE_LAST] = {
1158 [TGSI_OPCODE_ARL] = 0,
1159 [TGSI_OPCODE_MOV] = nir_op_fmov,
1160 [TGSI_OPCODE_LIT] = 0,
1161 [TGSI_OPCODE_RCP] = nir_op_frcp,
1162 [TGSI_OPCODE_RSQ] = nir_op_frsq,
1163 [TGSI_OPCODE_EXP] = 0,
1164 [TGSI_OPCODE_LOG] = 0,
1165 [TGSI_OPCODE_MUL] = nir_op_fmul,
1166 [TGSI_OPCODE_ADD] = nir_op_fadd,
1167 [TGSI_OPCODE_DP3] = 0,
1168 [TGSI_OPCODE_DP4] = 0,
1169 [TGSI_OPCODE_DST] = 0,
1170 [TGSI_OPCODE_MIN] = nir_op_fmin,
1171 [TGSI_OPCODE_MAX] = nir_op_fmax,
1172 [TGSI_OPCODE_SLT] = nir_op_slt,
1173 [TGSI_OPCODE_SGE] = nir_op_sge,
1174 [TGSI_OPCODE_MAD] = nir_op_ffma,
1175 [TGSI_OPCODE_SUB] = nir_op_fsub,
1176 [TGSI_OPCODE_LRP] = 0,
1177 [TGSI_OPCODE_SQRT] = nir_op_fsqrt,
1178 [TGSI_OPCODE_DP2A] = 0,
1179 [TGSI_OPCODE_FRC] = nir_op_ffract,
1180 [TGSI_OPCODE_CLAMP] = 0,
1181 [TGSI_OPCODE_FLR] = nir_op_ffloor,
1182 [TGSI_OPCODE_ROUND] = nir_op_fround_even,
1183 [TGSI_OPCODE_EX2] = nir_op_fexp2,
1184 [TGSI_OPCODE_LG2] = nir_op_flog2,
1185 [TGSI_OPCODE_POW] = nir_op_fpow,
1186 [TGSI_OPCODE_XPD] = 0,
1187 [TGSI_OPCODE_ABS] = nir_op_fabs,
1188 [TGSI_OPCODE_DPH] = 0,
1189 [TGSI_OPCODE_COS] = nir_op_fcos,
1190 [TGSI_OPCODE_DDX] = nir_op_fddx,
1191 [TGSI_OPCODE_DDY] = nir_op_fddy,
1192 [TGSI_OPCODE_KILL] = 0,
1193 [TGSI_OPCODE_PK2H] = 0, /* XXX */
1194 [TGSI_OPCODE_PK2US] = 0, /* XXX */
1195 [TGSI_OPCODE_PK4B] = 0, /* XXX */
1196 [TGSI_OPCODE_PK4UB] = 0, /* XXX */
1197 [TGSI_OPCODE_SEQ] = nir_op_seq,
1198 [TGSI_OPCODE_SGT] = 0,
1199 [TGSI_OPCODE_SIN] = nir_op_fsin,
1200 [TGSI_OPCODE_SLE] = 0,
1201 [TGSI_OPCODE_TEX] = 0,
1202 [TGSI_OPCODE_TXD] = 0,
1203 [TGSI_OPCODE_TXP] = 0,
1204 [TGSI_OPCODE_UP2H] = 0, /* XXX */
1205 [TGSI_OPCODE_UP2US] = 0, /* XXX */
1206 [TGSI_OPCODE_UP4B] = 0, /* XXX */
1207 [TGSI_OPCODE_UP4UB] = 0, /* XXX */
1208 [TGSI_OPCODE_ARR] = 0,
1209
1210 /* No function calls, yet. */
1211 [TGSI_OPCODE_CAL] = 0, /* XXX */
1212 [TGSI_OPCODE_RET] = 0, /* XXX */
1213
1214 [TGSI_OPCODE_SSG] = nir_op_fsign,
1215 [TGSI_OPCODE_CMP] = 0,
1216 [TGSI_OPCODE_SCS] = 0,
1217 [TGSI_OPCODE_TXB] = 0,
1218 [TGSI_OPCODE_DIV] = nir_op_fdiv,
1219 [TGSI_OPCODE_DP2] = 0,
1220 [TGSI_OPCODE_DP2A] = 0,
1221 [TGSI_OPCODE_TXL] = 0,
1222
1223 [TGSI_OPCODE_BRK] = 0,
1224 [TGSI_OPCODE_IF] = 0,
1225 [TGSI_OPCODE_UIF] = 0,
1226 [TGSI_OPCODE_ELSE] = 0,
1227 [TGSI_OPCODE_ENDIF] = 0,
1228
1229 [TGSI_OPCODE_DDX_FINE] = nir_op_fddx_fine,
1230 [TGSI_OPCODE_DDY_FINE] = nir_op_fddy_fine,
1231
1232 [TGSI_OPCODE_PUSHA] = 0, /* XXX */
1233 [TGSI_OPCODE_POPA] = 0, /* XXX */
1234
1235 [TGSI_OPCODE_CEIL] = nir_op_fceil,
1236 [TGSI_OPCODE_I2F] = nir_op_i2f,
1237 [TGSI_OPCODE_NOT] = nir_op_inot,
1238 [TGSI_OPCODE_TRUNC] = nir_op_ftrunc,
1239 [TGSI_OPCODE_SHL] = nir_op_ishl,
1240 [TGSI_OPCODE_AND] = nir_op_iand,
1241 [TGSI_OPCODE_OR] = nir_op_ior,
1242 [TGSI_OPCODE_MOD] = nir_op_umod,
1243 [TGSI_OPCODE_XOR] = nir_op_ixor,
1244 [TGSI_OPCODE_SAD] = 0, /* XXX */
1245 [TGSI_OPCODE_TXF] = 0,
1246 [TGSI_OPCODE_TXQ] = 0,
1247
1248 [TGSI_OPCODE_CONT] = 0,
1249
1250 [TGSI_OPCODE_EMIT] = 0, /* XXX */
1251 [TGSI_OPCODE_ENDPRIM] = 0, /* XXX */
1252
1253 [TGSI_OPCODE_BGNLOOP] = 0,
1254 [TGSI_OPCODE_BGNSUB] = 0, /* XXX: no function calls */
1255 [TGSI_OPCODE_ENDLOOP] = 0,
1256 [TGSI_OPCODE_ENDSUB] = 0, /* XXX: no function calls */
1257
1258 [TGSI_OPCODE_TXQ_LZ] = 0,
1259 [TGSI_OPCODE_NOP] = 0,
1260 [TGSI_OPCODE_FSEQ] = nir_op_feq,
1261 [TGSI_OPCODE_FSGE] = nir_op_fge,
1262 [TGSI_OPCODE_FSLT] = nir_op_flt,
1263 [TGSI_OPCODE_FSNE] = nir_op_fne,
1264
1265 /* No control flow yet */
1266 [TGSI_OPCODE_CALLNZ] = 0, /* XXX */
1267 [TGSI_OPCODE_BREAKC] = 0, /* not emitted by glsl_to_tgsi.cpp */
1268
1269 [TGSI_OPCODE_KILL_IF] = 0,
1270
1271 [TGSI_OPCODE_END] = 0,
1272
1273 [TGSI_OPCODE_F2I] = nir_op_f2i,
1274 [TGSI_OPCODE_IDIV] = nir_op_idiv,
1275 [TGSI_OPCODE_IMAX] = nir_op_imax,
1276 [TGSI_OPCODE_IMIN] = nir_op_imin,
1277 [TGSI_OPCODE_INEG] = nir_op_ineg,
1278 [TGSI_OPCODE_ISGE] = nir_op_ige,
1279 [TGSI_OPCODE_ISHR] = nir_op_ishr,
1280 [TGSI_OPCODE_ISLT] = nir_op_ilt,
1281 [TGSI_OPCODE_F2U] = nir_op_f2u,
1282 [TGSI_OPCODE_U2F] = nir_op_u2f,
1283 [TGSI_OPCODE_UADD] = nir_op_iadd,
1284 [TGSI_OPCODE_UDIV] = nir_op_udiv,
1285 [TGSI_OPCODE_UMAD] = 0,
1286 [TGSI_OPCODE_UMAX] = nir_op_umax,
1287 [TGSI_OPCODE_UMIN] = nir_op_umin,
1288 [TGSI_OPCODE_UMOD] = nir_op_umod,
1289 [TGSI_OPCODE_UMUL] = nir_op_imul,
1290 [TGSI_OPCODE_USEQ] = nir_op_ieq,
1291 [TGSI_OPCODE_USGE] = nir_op_uge,
1292 [TGSI_OPCODE_USHR] = nir_op_ushr,
1293 [TGSI_OPCODE_USLT] = nir_op_ult,
1294 [TGSI_OPCODE_USNE] = nir_op_ine,
1295
1296 [TGSI_OPCODE_SWITCH] = 0, /* not emitted by glsl_to_tgsi.cpp */
1297 [TGSI_OPCODE_CASE] = 0, /* not emitted by glsl_to_tgsi.cpp */
1298 [TGSI_OPCODE_DEFAULT] = 0, /* not emitted by glsl_to_tgsi.cpp */
1299 [TGSI_OPCODE_ENDSWITCH] = 0, /* not emitted by glsl_to_tgsi.cpp */
1300
1301 /* XXX: SAMPLE opcodes */
1302
1303 [TGSI_OPCODE_UARL] = nir_op_imov,
1304 [TGSI_OPCODE_UCMP] = 0,
1305 [TGSI_OPCODE_IABS] = nir_op_iabs,
1306 [TGSI_OPCODE_ISSG] = nir_op_isign,
1307
1308 /* XXX: atomics */
1309
1310 [TGSI_OPCODE_TEX2] = 0,
1311 [TGSI_OPCODE_TXB2] = 0,
1312 [TGSI_OPCODE_TXL2] = 0,
1313
1314 [TGSI_OPCODE_IMUL_HI] = nir_op_imul_high,
1315 [TGSI_OPCODE_UMUL_HI] = nir_op_umul_high,
1316
1317 [TGSI_OPCODE_TG4] = 0,
1318 [TGSI_OPCODE_LODQ] = 0, /* XXX */
1319
1320 [TGSI_OPCODE_IBFE] = nir_op_ibitfield_extract,
1321 [TGSI_OPCODE_UBFE] = nir_op_ubitfield_extract,
1322 [TGSI_OPCODE_BFI] = nir_op_bitfield_insert,
1323 [TGSI_OPCODE_BREV] = nir_op_bitfield_reverse,
1324 [TGSI_OPCODE_POPC] = nir_op_bit_count,
1325 [TGSI_OPCODE_LSB] = nir_op_find_lsb,
1326 [TGSI_OPCODE_IMSB] = nir_op_ifind_msb,
1327 [TGSI_OPCODE_UMSB] = nir_op_ifind_msb, /* XXX: signed vs unsigned */
1328
1329 [TGSI_OPCODE_INTERP_CENTROID] = 0, /* XXX */
1330 [TGSI_OPCODE_INTERP_SAMPLE] = 0, /* XXX */
1331 [TGSI_OPCODE_INTERP_OFFSET] = 0, /* XXX */
1332 };
1333
1334 static void
1335 ttn_emit_instruction(struct ttn_compile *c)
1336 {
1337 nir_builder *b = &c->build;
1338 struct tgsi_full_instruction *tgsi_inst = &c->token->FullInstruction;
1339 unsigned i;
1340 unsigned tgsi_op = tgsi_inst->Instruction.Opcode;
1341 struct tgsi_full_dst_register *tgsi_dst = &tgsi_inst->Dst[0];
1342
1343 if (tgsi_op == TGSI_OPCODE_END)
1344 return;
1345
1346 nir_ssa_def *src[TGSI_FULL_MAX_SRC_REGISTERS];
1347 for (i = 0; i < TGSI_FULL_MAX_SRC_REGISTERS; i++) {
1348 src[i] = ttn_get_src(c, &tgsi_inst->Src[i]);
1349 }
1350 nir_alu_dest dest = ttn_get_dest(c, tgsi_dst);
1351
1352 switch (tgsi_op) {
1353 case TGSI_OPCODE_RSQ:
1354 ttn_move_dest(b, dest, nir_frsq(b, ttn_channel(b, src[0], X)));
1355 break;
1356
1357 case TGSI_OPCODE_SQRT:
1358 ttn_move_dest(b, dest, nir_fsqrt(b, ttn_channel(b, src[0], X)));
1359 break;
1360
1361 case TGSI_OPCODE_RCP:
1362 ttn_move_dest(b, dest, nir_frcp(b, ttn_channel(b, src[0], X)));
1363 break;
1364
1365 case TGSI_OPCODE_EX2:
1366 ttn_move_dest(b, dest, nir_fexp2(b, ttn_channel(b, src[0], X)));
1367 break;
1368
1369 case TGSI_OPCODE_LG2:
1370 ttn_move_dest(b, dest, nir_flog2(b, ttn_channel(b, src[0], X)));
1371 break;
1372
1373 case TGSI_OPCODE_POW:
1374 ttn_move_dest(b, dest, nir_fpow(b,
1375 ttn_channel(b, src[0], X),
1376 ttn_channel(b, src[1], X)));
1377 break;
1378
1379 case TGSI_OPCODE_COS:
1380 ttn_move_dest(b, dest, nir_fcos(b, ttn_channel(b, src[0], X)));
1381 break;
1382
1383 case TGSI_OPCODE_SIN:
1384 ttn_move_dest(b, dest, nir_fsin(b, ttn_channel(b, src[0], X)));
1385 break;
1386
1387 case TGSI_OPCODE_ARL:
1388 ttn_arl(b, op_trans[tgsi_op], dest, src);
1389 break;
1390
1391 case TGSI_OPCODE_EXP:
1392 ttn_exp(b, op_trans[tgsi_op], dest, src);
1393 break;
1394
1395 case TGSI_OPCODE_LOG:
1396 ttn_log(b, op_trans[tgsi_op], dest, src);
1397 break;
1398
1399 case TGSI_OPCODE_DST:
1400 ttn_dst(b, op_trans[tgsi_op], dest, src);
1401 break;
1402
1403 case TGSI_OPCODE_LIT:
1404 ttn_lit(b, op_trans[tgsi_op], dest, src);
1405 break;
1406
1407 case TGSI_OPCODE_CLAMP:
1408 ttn_clamp(b, op_trans[tgsi_op], dest, src);
1409 break;
1410
1411 case TGSI_OPCODE_XPD:
1412 ttn_xpd(b, op_trans[tgsi_op], dest, src);
1413 break;
1414
1415 case TGSI_OPCODE_DP2:
1416 ttn_dp2(b, op_trans[tgsi_op], dest, src);
1417 break;
1418
1419 case TGSI_OPCODE_DP3:
1420 ttn_dp3(b, op_trans[tgsi_op], dest, src);
1421 break;
1422
1423 case TGSI_OPCODE_DP4:
1424 ttn_dp4(b, op_trans[tgsi_op], dest, src);
1425 break;
1426
1427 case TGSI_OPCODE_DP2A:
1428 ttn_dp2a(b, op_trans[tgsi_op], dest, src);
1429 break;
1430
1431 case TGSI_OPCODE_DPH:
1432 ttn_dph(b, op_trans[tgsi_op], dest, src);
1433 break;
1434
1435 case TGSI_OPCODE_UMAD:
1436 ttn_umad(b, op_trans[tgsi_op], dest, src);
1437 break;
1438
1439 case TGSI_OPCODE_LRP:
1440 ttn_move_dest(b, dest, nir_flrp(b, src[2], src[1], src[0]));
1441 break;
1442
1443 case TGSI_OPCODE_KILL:
1444 ttn_kill(b, op_trans[tgsi_op], dest, src);
1445 break;
1446
1447 case TGSI_OPCODE_ARR:
1448 ttn_arr(b, op_trans[tgsi_op], dest, src);
1449 break;
1450
1451 case TGSI_OPCODE_CMP:
1452 ttn_cmp(b, op_trans[tgsi_op], dest, src);
1453 break;
1454
1455 case TGSI_OPCODE_UCMP:
1456 ttn_ucmp(b, op_trans[tgsi_op], dest, src);
1457 break;
1458
1459 case TGSI_OPCODE_SCS:
1460 ttn_scs(b, op_trans[tgsi_op], dest, src);
1461 break;
1462
1463 case TGSI_OPCODE_SGT:
1464 ttn_sgt(b, op_trans[tgsi_op], dest, src);
1465 break;
1466
1467 case TGSI_OPCODE_SLE:
1468 ttn_sle(b, op_trans[tgsi_op], dest, src);
1469 break;
1470
1471 case TGSI_OPCODE_KILL_IF:
1472 ttn_kill_if(b, op_trans[tgsi_op], dest, src);
1473 break;
1474
1475 case TGSI_OPCODE_TEX:
1476 case TGSI_OPCODE_TXP:
1477 case TGSI_OPCODE_TXL:
1478 case TGSI_OPCODE_TXB:
1479 case TGSI_OPCODE_TXD:
1480 case TGSI_OPCODE_TXL2:
1481 case TGSI_OPCODE_TXB2:
1482 case TGSI_OPCODE_TXQ_LZ:
1483 case TGSI_OPCODE_TXF:
1484 case TGSI_OPCODE_TG4:
1485 ttn_tex(c, dest, src);
1486 break;
1487
1488 case TGSI_OPCODE_TXQ:
1489 ttn_txq(c, dest, src);
1490 break;
1491
1492 case TGSI_OPCODE_NOP:
1493 break;
1494
1495 case TGSI_OPCODE_IF:
1496 ttn_if(c, src[0], false);
1497 break;
1498
1499 case TGSI_OPCODE_UIF:
1500 ttn_if(c, src[0], true);
1501 break;
1502
1503 case TGSI_OPCODE_ELSE:
1504 ttn_else(c);
1505 break;
1506
1507 case TGSI_OPCODE_ENDIF:
1508 ttn_endif(c);
1509 break;
1510
1511 case TGSI_OPCODE_BGNLOOP:
1512 ttn_bgnloop(c);
1513 break;
1514
1515 case TGSI_OPCODE_BRK:
1516 ttn_brk(b);
1517 break;
1518
1519 case TGSI_OPCODE_CONT:
1520 ttn_cont(b);
1521 break;
1522
1523 case TGSI_OPCODE_ENDLOOP:
1524 ttn_endloop(c);
1525 break;
1526
1527 default:
1528 if (op_trans[tgsi_op] != 0 || tgsi_op == TGSI_OPCODE_MOV) {
1529 ttn_alu(b, op_trans[tgsi_op], dest, src);
1530 } else {
1531 fprintf(stderr, "unknown TGSI opcode: %s\n",
1532 tgsi_get_opcode_name(tgsi_op));
1533 abort();
1534 }
1535 break;
1536 }
1537
1538 if (tgsi_inst->Instruction.Saturate) {
1539 assert(tgsi_inst->Instruction.Saturate == TGSI_SAT_ZERO_ONE);
1540 assert(!dest.dest.is_ssa);
1541 ttn_move_dest(b, dest, nir_fsat(b, ttn_src_for_dest(b, &dest)));
1542 }
1543
1544 /* if the dst has a matching var, append store_global to move
1545 * output from reg to var
1546 */
1547 nir_variable *var = ttn_get_var(c, tgsi_dst);
1548 if (var) {
1549 unsigned index = tgsi_dst->Register.Index;
1550 unsigned offset = c->temp_regs[index].offset;
1551 nir_intrinsic_instr *store =
1552 nir_intrinsic_instr_create(b->shader, nir_intrinsic_store_var);
1553 struct tgsi_ind_register *indirect = tgsi_dst->Register.Indirect ?
1554 &tgsi_dst->Indirect : NULL;
1555
1556 store->num_components = 4;
1557 store->variables[0] = ttn_array_deref(c, store, var, offset, indirect);
1558 store->src[0] = nir_src_for_reg(dest.dest.reg.reg);
1559
1560 nir_instr_insert_after_cf_list(b->cf_node_list, &store->instr);
1561 }
1562 }
1563
1564 /**
1565 * Puts a NIR intrinsic to store of each TGSI_FILE_OUTPUT value to the output
1566 * variables at the end of the shader.
1567 *
1568 * We don't generate these incrementally as the TGSI_FILE_OUTPUT values are
1569 * written, because there's no output load intrinsic, which means we couldn't
1570 * handle writemasks.
1571 */
1572 static void
1573 ttn_add_output_stores(struct ttn_compile *c)
1574 {
1575 nir_builder *b = &c->build;
1576
1577 foreach_list_typed(nir_variable, var, node, &b->shader->outputs) {
1578 unsigned array_len = MAX2(glsl_get_length(var->type), 1);
1579 unsigned i;
1580
1581 for (i = 0; i < array_len; i++) {
1582 nir_intrinsic_instr *store =
1583 nir_intrinsic_instr_create(b->shader, nir_intrinsic_store_output);
1584 store->num_components = 4;
1585 store->const_index[0] = var->data.driver_location + i;
1586 store->const_index[1] = 1;
1587 store->src[0].reg.reg = c->output_regs[var->data.driver_location].reg;
1588 nir_instr_insert_after_cf_list(b->cf_node_list, &store->instr);
1589 }
1590 }
1591 }
1592
1593 struct nir_shader *
1594 tgsi_to_nir(const void *tgsi_tokens,
1595 const nir_shader_compiler_options *options)
1596 {
1597 struct tgsi_parse_context parser;
1598 struct tgsi_shader_info scan;
1599 struct ttn_compile *c;
1600 struct nir_shader *s;
1601 int ret;
1602
1603 c = rzalloc(NULL, struct ttn_compile);
1604 s = nir_shader_create(NULL, options);
1605
1606 nir_function *func = nir_function_create(s, "main");
1607 nir_function_overload *overload = nir_function_overload_create(func);
1608 nir_function_impl *impl = nir_function_impl_create(overload);
1609
1610 nir_builder_init(&c->build, impl);
1611 nir_builder_insert_after_cf_list(&c->build, &impl->body);
1612
1613 tgsi_scan_shader(tgsi_tokens, &scan);
1614 c->scan = &scan;
1615
1616 s->num_inputs = scan.file_max[TGSI_FILE_INPUT] + 1;
1617 s->num_uniforms = scan.file_max[TGSI_FILE_CONSTANT] + 1;
1618 s->num_outputs = scan.file_max[TGSI_FILE_OUTPUT] + 1;
1619
1620 c->output_regs = rzalloc_array(c, struct ttn_reg_info,
1621 scan.file_max[TGSI_FILE_OUTPUT] + 1);
1622 c->temp_regs = rzalloc_array(c, struct ttn_reg_info,
1623 scan.file_max[TGSI_FILE_TEMPORARY] + 1);
1624 c->imm_defs = rzalloc_array(c, nir_ssa_def *,
1625 scan.file_max[TGSI_FILE_IMMEDIATE] + 1);
1626
1627 c->if_stack = rzalloc_array(c, struct exec_list *,
1628 (scan.opcode_count[TGSI_OPCODE_IF] +
1629 scan.opcode_count[TGSI_OPCODE_UIF]) * 2);
1630 c->loop_stack = rzalloc_array(c, struct exec_list *,
1631 scan.opcode_count[TGSI_OPCODE_BGNLOOP]);
1632
1633 ret = tgsi_parse_init(&parser, tgsi_tokens);
1634 assert(ret == TGSI_PARSE_OK);
1635
1636 while (!tgsi_parse_end_of_tokens(&parser)) {
1637 tgsi_parse_token(&parser);
1638 c->token = &parser.FullToken;
1639
1640 switch (parser.FullToken.Token.Type) {
1641 case TGSI_TOKEN_TYPE_DECLARATION:
1642 ttn_emit_declaration(c);
1643 break;
1644
1645 case TGSI_TOKEN_TYPE_INSTRUCTION:
1646 ttn_emit_instruction(c);
1647 break;
1648
1649 case TGSI_TOKEN_TYPE_IMMEDIATE:
1650 ttn_emit_immediate(c);
1651 break;
1652 }
1653 }
1654
1655 tgsi_parse_free(&parser);
1656
1657 ttn_add_output_stores(c);
1658
1659 ralloc_free(c);
1660 return s;
1661 }