2 * Copyright © 2014-2015 Broadcom
3 * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "util/ralloc.h"
26 #include "glsl/nir/nir.h"
27 #include "glsl/nir/nir_builder.h"
28 #include "glsl/list.h"
29 #include "glsl/shader_enums.h"
31 #include "nir/tgsi_to_nir.h"
32 #include "tgsi/tgsi_parse.h"
33 #include "tgsi/tgsi_dump.h"
34 #include "tgsi/tgsi_info.h"
35 #include "tgsi/tgsi_scan.h"
37 #define SWIZ(X, Y, Z, W) (unsigned[4]){ \
45 /** nir register containing this TGSI index. */
48 /** Offset (in vec4s) from the start of var for this TGSI index. */
53 union tgsi_full_token
*token
;
55 struct tgsi_shader_info
*scan
;
57 struct ttn_reg_info
*output_regs
;
58 struct ttn_reg_info
*temp_regs
;
59 nir_ssa_def
**imm_defs
;
61 nir_register
*addr_reg
;
64 * Stack of cf_node_lists where instructions should be pushed as we pop
65 * back out of the control flow stack.
67 * For each IF/ELSE/ENDIF block, if_stack[if_stack_pos] has where the else
68 * instructions should be placed, and if_stack[if_stack_pos - 1] has where
69 * the next instructions outside of the if/then/else block go.
71 struct exec_list
**if_stack
;
72 unsigned if_stack_pos
;
75 * Stack of cf_node_lists where instructions should be pushed as we pop
76 * back out of the control flow stack.
78 * loop_stack[loop_stack_pos - 1] contains the cf_node_list for the outside
81 struct exec_list
**loop_stack
;
82 unsigned loop_stack_pos
;
84 /* How many TGSI_FILE_IMMEDIATE vec4s have been parsed so far. */
88 #define ttn_swizzle(b, src, x, y, z, w) \
89 nir_swizzle(b, src, SWIZ(x, y, z, w), 4, false)
90 #define ttn_channel(b, src, swiz) \
91 nir_swizzle(b, src, SWIZ(swiz, swiz, swiz, swiz), 1, false)
94 ttn_src_for_dest(nir_builder
*b
, nir_alu_dest
*dest
)
97 memset(&src
, 0, sizeof(src
));
99 if (dest
->dest
.is_ssa
)
100 src
.src
= nir_src_for_ssa(&dest
->dest
.ssa
);
102 assert(!dest
->dest
.reg
.indirect
);
103 src
.src
= nir_src_for_reg(dest
->dest
.reg
.reg
);
104 src
.src
.reg
.base_offset
= dest
->dest
.reg
.base_offset
;
107 for (int i
= 0; i
< 4; i
++)
110 return nir_fmov_alu(b
, src
, 4);
114 ttn_emit_declaration(struct ttn_compile
*c
)
116 nir_builder
*b
= &c
->build
;
117 struct tgsi_full_declaration
*decl
= &c
->token
->FullDeclaration
;
118 unsigned array_size
= decl
->Range
.Last
- decl
->Range
.First
+ 1;
119 unsigned file
= decl
->Declaration
.File
;
122 if (file
== TGSI_FILE_TEMPORARY
) {
123 if (decl
->Declaration
.Array
) {
124 /* for arrays, we create variables instead of registers: */
125 nir_variable
*var
= rzalloc(b
->shader
, nir_variable
);
127 var
->type
= glsl_array_type(glsl_vec4_type(), array_size
);
128 var
->data
.mode
= nir_var_global
;
129 var
->name
= ralloc_asprintf(var
, "arr_%d", decl
->Array
.ArrayID
);
131 exec_list_push_tail(&b
->shader
->globals
, &var
->node
);
133 for (i
= 0; i
< array_size
; i
++) {
134 /* point all the matching slots to the same var,
135 * with appropriate offset set, mostly just so
136 * we know what to do when tgsi does a non-indirect
139 c
->temp_regs
[decl
->Range
.First
+ i
].reg
= NULL
;
140 c
->temp_regs
[decl
->Range
.First
+ i
].var
= var
;
141 c
->temp_regs
[decl
->Range
.First
+ i
].offset
= i
;
144 for (i
= 0; i
< array_size
; i
++) {
145 nir_register
*reg
= nir_local_reg_create(b
->impl
);
146 reg
->num_components
= 4;
147 c
->temp_regs
[decl
->Range
.First
+ i
].reg
= reg
;
148 c
->temp_regs
[decl
->Range
.First
+ i
].var
= NULL
;
149 c
->temp_regs
[decl
->Range
.First
+ i
].offset
= 0;
152 } else if (file
== TGSI_FILE_ADDRESS
) {
153 c
->addr_reg
= nir_local_reg_create(b
->impl
);
154 c
->addr_reg
->num_components
= 4;
155 } else if (file
== TGSI_FILE_SYSTEM_VALUE
) {
156 /* Nothing to record for system values. */
157 } else if (file
== TGSI_FILE_SAMPLER
) {
158 /* Nothing to record for samplers. */
161 assert(file
== TGSI_FILE_INPUT
||
162 file
== TGSI_FILE_OUTPUT
||
163 file
== TGSI_FILE_CONSTANT
);
165 /* nothing to do for UBOs: */
166 if ((file
== TGSI_FILE_CONSTANT
) && decl
->Declaration
.Dimension
)
169 var
= rzalloc(b
->shader
, nir_variable
);
170 var
->data
.driver_location
= decl
->Range
.First
;
172 var
->type
= glsl_vec4_type();
174 var
->type
= glsl_array_type(var
->type
, array_size
);
177 case TGSI_FILE_INPUT
:
178 var
->data
.read_only
= true;
179 var
->data
.mode
= nir_var_shader_in
;
180 var
->name
= ralloc_asprintf(var
, "in_%d", decl
->Range
.First
);
182 /* We should probably translate to a VERT_ATTRIB_* or VARYING_SLOT_*
183 * instead, but nothing in NIR core is looking at the value
184 * currently, and this is less change to drivers.
186 var
->data
.location
= decl
->Semantic
.Name
;
187 var
->data
.index
= decl
->Semantic
.Index
;
189 /* We definitely need to translate the interpolation field, because
190 * nir_print will decode it.
192 switch (decl
->Interp
.Interpolate
) {
193 case TGSI_INTERPOLATE_CONSTANT
:
194 var
->data
.interpolation
= INTERP_QUALIFIER_FLAT
;
196 case TGSI_INTERPOLATE_LINEAR
:
197 var
->data
.interpolation
= INTERP_QUALIFIER_NOPERSPECTIVE
;
199 case TGSI_INTERPOLATE_PERSPECTIVE
:
200 var
->data
.interpolation
= INTERP_QUALIFIER_SMOOTH
;
204 exec_list_push_tail(&b
->shader
->inputs
, &var
->node
);
206 case TGSI_FILE_OUTPUT
: {
207 /* Since we can't load from outputs in the IR, we make temporaries
208 * for the outputs and emit stores to the real outputs at the end of
211 nir_register
*reg
= nir_local_reg_create(b
->impl
);
212 reg
->num_components
= 4;
214 reg
->num_array_elems
= array_size
;
216 var
->data
.mode
= nir_var_shader_out
;
217 var
->name
= ralloc_asprintf(var
, "out_%d", decl
->Range
.First
);
219 var
->data
.location
= decl
->Semantic
.Name
;
220 var
->data
.index
= decl
->Semantic
.Index
;
222 for (i
= 0; i
< array_size
; i
++) {
223 c
->output_regs
[decl
->Range
.First
+ i
].offset
= i
;
224 c
->output_regs
[decl
->Range
.First
+ i
].reg
= reg
;
227 exec_list_push_tail(&b
->shader
->outputs
, &var
->node
);
230 case TGSI_FILE_CONSTANT
:
231 var
->data
.mode
= nir_var_uniform
;
232 var
->name
= ralloc_asprintf(var
, "uniform_%d", decl
->Range
.First
);
234 exec_list_push_tail(&b
->shader
->uniforms
, &var
->node
);
237 unreachable("bad declaration file");
245 ttn_emit_immediate(struct ttn_compile
*c
)
247 nir_builder
*b
= &c
->build
;
248 struct tgsi_full_immediate
*tgsi_imm
= &c
->token
->FullImmediate
;
249 nir_load_const_instr
*load_const
;
252 load_const
= nir_load_const_instr_create(b
->shader
, 4);
253 c
->imm_defs
[c
->next_imm
] = &load_const
->def
;
256 for (i
= 0; i
< 4; i
++)
257 load_const
->value
.u
[i
] = tgsi_imm
->u
[i
].Uint
;
259 nir_instr_insert_after_cf_list(b
->cf_node_list
, &load_const
->instr
);
263 ttn_src_for_indirect(struct ttn_compile
*c
, struct tgsi_ind_register
*indirect
);
265 /* generate either a constant or indirect deref chain for accessing an
268 static nir_deref_var
*
269 ttn_array_deref(struct ttn_compile
*c
, nir_intrinsic_instr
*instr
,
270 nir_variable
*var
, unsigned offset
,
271 struct tgsi_ind_register
*indirect
)
273 nir_deref_var
*deref
= nir_deref_var_create(instr
, var
);
274 nir_deref_array
*arr
= nir_deref_array_create(deref
);
276 arr
->base_offset
= offset
;
277 arr
->deref
.type
= glsl_get_array_element(var
->type
);
280 arr
->deref_array_type
= nir_deref_array_type_indirect
;
281 arr
->indirect
= ttn_src_for_indirect(c
, indirect
);
283 arr
->deref_array_type
= nir_deref_array_type_direct
;
286 deref
->deref
.child
= &arr
->deref
;
292 ttn_src_for_file_and_index(struct ttn_compile
*c
, unsigned file
, unsigned index
,
293 struct tgsi_ind_register
*indirect
,
294 struct tgsi_dimension
*dim
,
295 struct tgsi_ind_register
*dimind
)
297 nir_builder
*b
= &c
->build
;
300 memset(&src
, 0, sizeof(src
));
303 case TGSI_FILE_TEMPORARY
:
304 if (c
->temp_regs
[index
].var
) {
305 unsigned offset
= c
->temp_regs
[index
].offset
;
306 nir_variable
*var
= c
->temp_regs
[index
].var
;
307 nir_intrinsic_instr
*load
;
309 load
= nir_intrinsic_instr_create(b
->shader
,
310 nir_intrinsic_load_var
);
311 load
->num_components
= 4;
312 load
->variables
[0] = ttn_array_deref(c
, load
, var
, offset
, indirect
);
314 nir_ssa_dest_init(&load
->instr
, &load
->dest
, 4, NULL
);
315 nir_instr_insert_after_cf_list(b
->cf_node_list
, &load
->instr
);
317 src
= nir_src_for_ssa(&load
->dest
.ssa
);
321 src
.reg
.reg
= c
->temp_regs
[index
].reg
;
326 case TGSI_FILE_ADDRESS
:
327 src
.reg
.reg
= c
->addr_reg
;
331 case TGSI_FILE_IMMEDIATE
:
332 src
= nir_src_for_ssa(c
->imm_defs
[index
]);
337 case TGSI_FILE_SYSTEM_VALUE
: {
338 nir_intrinsic_instr
*load
;
345 switch (c
->scan
->system_value_semantic_name
[index
]) {
346 case TGSI_SEMANTIC_VERTEXID_NOBASE
:
347 op
= nir_intrinsic_load_vertex_id_zero_base
;
349 case TGSI_SEMANTIC_VERTEXID
:
350 op
= nir_intrinsic_load_vertex_id
;
352 case TGSI_SEMANTIC_BASEVERTEX
:
353 op
= nir_intrinsic_load_base_vertex
;
355 case TGSI_SEMANTIC_INSTANCEID
:
356 op
= nir_intrinsic_load_instance_id
;
359 unreachable("bad system value");
362 load
= nir_intrinsic_instr_create(b
->shader
, op
);
363 load
->num_components
= ncomp
;
365 nir_ssa_dest_init(&load
->instr
, &load
->dest
, ncomp
, NULL
);
366 nir_instr_insert_after_cf_list(b
->cf_node_list
, &load
->instr
);
368 src
= nir_src_for_ssa(&load
->dest
.ssa
);
372 case TGSI_FILE_INPUT
:
373 case TGSI_FILE_CONSTANT
: {
374 nir_intrinsic_instr
*load
;
379 case TGSI_FILE_INPUT
:
380 op
= indirect
? nir_intrinsic_load_input_indirect
:
381 nir_intrinsic_load_input
;
384 case TGSI_FILE_CONSTANT
:
386 op
= indirect
? nir_intrinsic_load_ubo_indirect
:
387 nir_intrinsic_load_ubo
;
388 /* convert index from vec4 to byte: */
391 op
= indirect
? nir_intrinsic_load_uniform_indirect
:
392 nir_intrinsic_load_uniform
;
396 unreachable("No other load files supported");
400 load
= nir_intrinsic_instr_create(b
->shader
, op
);
402 load
->num_components
= 4;
403 load
->const_index
[0] = index
;
404 load
->const_index
[1] = 1;
408 ttn_src_for_file_and_index(c
, dimind
->File
, dimind
->Index
,
411 /* UBOs start at index 1 in TGSI: */
413 nir_src_for_ssa(nir_imm_int(b
, dim
->Index
- 1));
418 load
->src
[srcn
] = ttn_src_for_indirect(c
, indirect
);
420 assert(load
->src
[srcn
].is_ssa
);
421 /* we also need to covert vec4 to byte here too: */
423 nir_src_for_ssa(nir_ishl(b
, load
->src
[srcn
].ssa
,
428 nir_ssa_dest_init(&load
->instr
, &load
->dest
, 4, NULL
);
429 nir_instr_insert_after_cf_list(b
->cf_node_list
, &load
->instr
);
431 src
= nir_src_for_ssa(&load
->dest
.ssa
);
436 unreachable("bad src file");
444 ttn_src_for_indirect(struct ttn_compile
*c
, struct tgsi_ind_register
*indirect
)
446 nir_builder
*b
= &c
->build
;
448 memset(&src
, 0, sizeof(src
));
449 for (int i
= 0; i
< 4; i
++)
450 src
.swizzle
[i
] = indirect
->Swizzle
;
451 src
.src
= ttn_src_for_file_and_index(c
,
455 return nir_src_for_ssa(nir_imov_alu(b
, src
, 1));
459 ttn_get_dest(struct ttn_compile
*c
, struct tgsi_full_dst_register
*tgsi_fdst
)
461 struct tgsi_dst_register
*tgsi_dst
= &tgsi_fdst
->Register
;
463 unsigned index
= tgsi_dst
->Index
;
465 memset(&dest
, 0, sizeof(dest
));
467 if (tgsi_dst
->File
== TGSI_FILE_TEMPORARY
) {
468 if (c
->temp_regs
[index
].var
) {
469 nir_builder
*b
= &c
->build
;
470 nir_intrinsic_instr
*load
;
471 struct tgsi_ind_register
*indirect
=
472 tgsi_dst
->Indirect
? &tgsi_fdst
->Indirect
: NULL
;
475 /* this works, because TGSI will give us a base offset
476 * (in case of indirect index) that points back into
477 * the array. Access can be direct or indirect, we
478 * don't really care. Just create a one-shot dst reg
479 * that will get store_var'd back into the array var
480 * at the end of ttn_emit_instruction()
482 reg
= nir_local_reg_create(c
->build
.impl
);
483 reg
->num_components
= 4;
484 dest
.dest
.reg
.reg
= reg
;
485 dest
.dest
.reg
.base_offset
= 0;
487 /* since the alu op might not write to all components
488 * of the temporary, we must first do a load_var to
489 * get the previous array elements into the register.
490 * This is one area that NIR could use a bit of
491 * improvement (or opt pass to clean up the mess
492 * once things are scalarized)
495 load
= nir_intrinsic_instr_create(c
->build
.shader
,
496 nir_intrinsic_load_var
);
497 load
->num_components
= 4;
499 ttn_array_deref(c
, load
, c
->temp_regs
[index
].var
,
500 c
->temp_regs
[index
].offset
,
503 load
->dest
= nir_dest_for_reg(reg
);
505 nir_instr_insert_after_cf_list(b
->cf_node_list
, &load
->instr
);
507 assert(!tgsi_dst
->Indirect
);
508 dest
.dest
.reg
.reg
= c
->temp_regs
[index
].reg
;
509 dest
.dest
.reg
.base_offset
= c
->temp_regs
[index
].offset
;
511 } else if (tgsi_dst
->File
== TGSI_FILE_OUTPUT
) {
512 dest
.dest
.reg
.reg
= c
->output_regs
[index
].reg
;
513 dest
.dest
.reg
.base_offset
= c
->output_regs
[index
].offset
;
514 } else if (tgsi_dst
->File
== TGSI_FILE_ADDRESS
) {
516 dest
.dest
.reg
.reg
= c
->addr_reg
;
519 dest
.write_mask
= tgsi_dst
->WriteMask
;
520 dest
.saturate
= false;
522 if (tgsi_dst
->Indirect
&& (tgsi_dst
->File
!= TGSI_FILE_TEMPORARY
)) {
523 nir_src
*indirect
= ralloc(c
->build
.shader
, nir_src
);
524 *indirect
= ttn_src_for_indirect(c
, &tgsi_fdst
->Indirect
);
525 dest
.dest
.reg
.indirect
= indirect
;
531 static nir_variable
*
532 ttn_get_var(struct ttn_compile
*c
, struct tgsi_full_dst_register
*tgsi_fdst
)
534 struct tgsi_dst_register
*tgsi_dst
= &tgsi_fdst
->Register
;
535 unsigned index
= tgsi_dst
->Index
;
537 if (tgsi_dst
->File
== TGSI_FILE_TEMPORARY
) {
538 /* we should not have an indirect when there is no var! */
539 if (!c
->temp_regs
[index
].var
)
540 assert(!tgsi_dst
->Indirect
);
541 return c
->temp_regs
[index
].var
;
548 ttn_get_src(struct ttn_compile
*c
, struct tgsi_full_src_register
*tgsi_fsrc
)
550 nir_builder
*b
= &c
->build
;
551 struct tgsi_src_register
*tgsi_src
= &tgsi_fsrc
->Register
;
552 unsigned tgsi_opcode
= c
->token
->FullInstruction
.Instruction
.Opcode
;
553 unsigned tgsi_src_type
= tgsi_opcode_infer_src_type(tgsi_opcode
);
554 bool src_is_float
= !(tgsi_src_type
== TGSI_TYPE_SIGNED
||
555 tgsi_src_type
== TGSI_TYPE_UNSIGNED
);
558 memset(&src
, 0, sizeof(src
));
560 if (tgsi_src
->File
== TGSI_FILE_NULL
) {
561 return nir_imm_float(b
, 0.0);
562 } else if (tgsi_src
->File
== TGSI_FILE_SAMPLER
) {
563 /* Only the index of the sampler gets used in texturing, and it will
564 * handle looking that up on its own instead of using the nir_alu_src.
566 assert(!tgsi_src
->Indirect
);
569 struct tgsi_ind_register
*ind
= NULL
;
570 struct tgsi_dimension
*dim
= NULL
;
571 struct tgsi_ind_register
*dimind
= NULL
;
572 if (tgsi_src
->Indirect
)
573 ind
= &tgsi_fsrc
->Indirect
;
574 if (tgsi_src
->Dimension
) {
575 dim
= &tgsi_fsrc
->Dimension
;
577 dimind
= &tgsi_fsrc
->DimIndirect
;
579 src
.src
= ttn_src_for_file_and_index(c
,
585 src
.swizzle
[0] = tgsi_src
->SwizzleX
;
586 src
.swizzle
[1] = tgsi_src
->SwizzleY
;
587 src
.swizzle
[2] = tgsi_src
->SwizzleZ
;
588 src
.swizzle
[3] = tgsi_src
->SwizzleW
;
590 nir_ssa_def
*def
= nir_fmov_alu(b
, src
, 4);
592 if (tgsi_src
->Absolute
) {
594 def
= nir_fabs(b
, def
);
596 def
= nir_iabs(b
, def
);
599 if (tgsi_src
->Negate
) {
601 def
= nir_fneg(b
, def
);
603 def
= nir_ineg(b
, def
);
610 ttn_alu(nir_builder
*b
, nir_op op
, nir_alu_dest dest
, nir_ssa_def
**src
)
612 unsigned num_srcs
= nir_op_infos
[op
].num_inputs
;
613 nir_alu_instr
*instr
= nir_alu_instr_create(b
->shader
, op
);
616 for (i
= 0; i
< num_srcs
; i
++)
617 instr
->src
[i
].src
= nir_src_for_ssa(src
[i
]);
620 nir_instr_insert_after_cf_list(b
->cf_node_list
, &instr
->instr
);
624 ttn_move_dest_masked(nir_builder
*b
, nir_alu_dest dest
,
625 nir_ssa_def
*def
, unsigned write_mask
)
627 if (!(dest
.write_mask
& write_mask
))
630 nir_alu_instr
*mov
= nir_alu_instr_create(b
->shader
, nir_op_imov
);
632 mov
->dest
.write_mask
&= write_mask
;
633 mov
->src
[0].src
= nir_src_for_ssa(def
);
634 for (unsigned i
= def
->num_components
; i
< 4; i
++)
635 mov
->src
[0].swizzle
[i
] = def
->num_components
- 1;
636 nir_instr_insert_after_cf_list(b
->cf_node_list
, &mov
->instr
);
640 ttn_move_dest(nir_builder
*b
, nir_alu_dest dest
, nir_ssa_def
*def
)
642 ttn_move_dest_masked(b
, dest
, def
, TGSI_WRITEMASK_XYZW
);
646 ttn_arl(nir_builder
*b
, nir_op op
, nir_alu_dest dest
, nir_ssa_def
**src
)
648 ttn_move_dest(b
, dest
, nir_f2i(b
, nir_ffloor(b
, src
[0])));
651 /* EXP - Approximate Exponential Base 2
652 * dst.x = 2^{\lfloor src.x\rfloor}
653 * dst.y = src.x - \lfloor src.x\rfloor
658 ttn_exp(nir_builder
*b
, nir_op op
, nir_alu_dest dest
, nir_ssa_def
**src
)
660 nir_ssa_def
*srcx
= ttn_channel(b
, src
[0], X
);
662 ttn_move_dest_masked(b
, dest
, nir_fexp2(b
, nir_ffloor(b
, srcx
)),
664 ttn_move_dest_masked(b
, dest
, nir_fsub(b
, srcx
, nir_ffloor(b
, srcx
)),
666 ttn_move_dest_masked(b
, dest
, nir_fexp2(b
, srcx
), TGSI_WRITEMASK_Z
);
667 ttn_move_dest_masked(b
, dest
, nir_imm_float(b
, 1.0), TGSI_WRITEMASK_W
);
670 /* LOG - Approximate Logarithm Base 2
671 * dst.x = \lfloor\log_2{|src.x|}\rfloor
672 * dst.y = \frac{|src.x|}{2^{\lfloor\log_2{|src.x|}\rfloor}}
673 * dst.z = \log_2{|src.x|}
677 ttn_log(nir_builder
*b
, nir_op op
, nir_alu_dest dest
, nir_ssa_def
**src
)
679 nir_ssa_def
*abs_srcx
= nir_fabs(b
, ttn_channel(b
, src
[0], X
));
680 nir_ssa_def
*log2
= nir_flog2(b
, abs_srcx
);
682 ttn_move_dest_masked(b
, dest
, nir_ffloor(b
, log2
), TGSI_WRITEMASK_X
);
683 ttn_move_dest_masked(b
, dest
,
684 nir_fdiv(b
, abs_srcx
, nir_fexp2(b
, nir_ffloor(b
, log2
))),
686 ttn_move_dest_masked(b
, dest
, nir_flog2(b
, abs_srcx
), TGSI_WRITEMASK_Z
);
687 ttn_move_dest_masked(b
, dest
, nir_imm_float(b
, 1.0), TGSI_WRITEMASK_W
);
690 /* DST - Distance Vector
692 * dst.y = src0.y \times src1.y
697 ttn_dst(nir_builder
*b
, nir_op op
, nir_alu_dest dest
, nir_ssa_def
**src
)
699 ttn_move_dest_masked(b
, dest
, nir_imm_float(b
, 1.0), TGSI_WRITEMASK_X
);
700 ttn_move_dest_masked(b
, dest
, nir_fmul(b
, src
[0], src
[1]), TGSI_WRITEMASK_Y
);
701 ttn_move_dest_masked(b
, dest
, nir_fmov(b
, src
[0]), TGSI_WRITEMASK_Z
);
702 ttn_move_dest_masked(b
, dest
, nir_fmov(b
, src
[1]), TGSI_WRITEMASK_W
);
705 /* LIT - Light Coefficients
707 * dst.y = max(src.x, 0.0)
708 * dst.z = (src.x > 0.0) ? max(src.y, 0.0)^{clamp(src.w, -128.0, 128.0))} : 0
712 ttn_lit(nir_builder
*b
, nir_op op
, nir_alu_dest dest
, nir_ssa_def
**src
)
714 ttn_move_dest_masked(b
, dest
, nir_imm_float(b
, 1.0), TGSI_WRITEMASK_XW
);
716 ttn_move_dest_masked(b
, dest
, nir_fmax(b
, ttn_channel(b
, src
[0], X
),
717 nir_imm_float(b
, 0.0)), TGSI_WRITEMASK_Y
);
719 if (dest
.write_mask
& TGSI_WRITEMASK_Z
) {
720 nir_ssa_def
*src0_y
= ttn_channel(b
, src
[0], Y
);
721 nir_ssa_def
*wclamp
= nir_fmax(b
, nir_fmin(b
, ttn_channel(b
, src
[0], W
),
722 nir_imm_float(b
, 128.0)),
723 nir_imm_float(b
, -128.0));
724 nir_ssa_def
*pow
= nir_fpow(b
, nir_fmax(b
, src0_y
, nir_imm_float(b
, 0.0)),
727 ttn_move_dest_masked(b
, dest
,
730 nir_imm_float(b
, 0.0),
731 ttn_channel(b
, src
[0], X
)),
732 nir_imm_float(b
, 0.0),
739 * dst.x = \cos{src.x}
740 * dst.y = \sin{src.x}
745 ttn_scs(nir_builder
*b
, nir_op op
, nir_alu_dest dest
, nir_ssa_def
**src
)
747 ttn_move_dest_masked(b
, dest
, nir_fcos(b
, ttn_channel(b
, src
[0], X
)),
749 ttn_move_dest_masked(b
, dest
, nir_fsin(b
, ttn_channel(b
, src
[0], X
)),
751 ttn_move_dest_masked(b
, dest
, nir_imm_float(b
, 0.0), TGSI_WRITEMASK_Z
);
752 ttn_move_dest_masked(b
, dest
, nir_imm_float(b
, 1.0), TGSI_WRITEMASK_W
);
756 ttn_sle(nir_builder
*b
, nir_op op
, nir_alu_dest dest
, nir_ssa_def
**src
)
758 ttn_move_dest(b
, dest
, nir_sge(b
, src
[1], src
[0]));
762 ttn_sgt(nir_builder
*b
, nir_op op
, nir_alu_dest dest
, nir_ssa_def
**src
)
764 ttn_move_dest(b
, dest
, nir_slt(b
, src
[1], src
[0]));
768 ttn_clamp(nir_builder
*b
, nir_op op
, nir_alu_dest dest
, nir_ssa_def
**src
)
770 ttn_move_dest(b
, dest
, nir_fmin(b
, nir_fmax(b
, src
[0], src
[1]), src
[2]));
774 ttn_xpd(nir_builder
*b
, nir_op op
, nir_alu_dest dest
, nir_ssa_def
**src
)
776 ttn_move_dest_masked(b
, dest
,
779 ttn_swizzle(b
, src
[0], Y
, Z
, X
, X
),
780 ttn_swizzle(b
, src
[1], Z
, X
, Y
, X
)),
782 ttn_swizzle(b
, src
[1], Y
, Z
, X
, X
),
783 ttn_swizzle(b
, src
[0], Z
, X
, Y
, X
))),
785 ttn_move_dest_masked(b
, dest
, nir_imm_float(b
, 1.0), TGSI_WRITEMASK_W
);
789 ttn_dp2a(nir_builder
*b
, nir_op op
, nir_alu_dest dest
, nir_ssa_def
**src
)
791 ttn_move_dest(b
, dest
,
792 ttn_channel(b
, nir_fadd(b
, nir_fdot2(b
, src
[0], src
[1]),
798 ttn_dp2(nir_builder
*b
, nir_op op
, nir_alu_dest dest
, nir_ssa_def
**src
)
800 ttn_move_dest(b
, dest
, nir_fdot2(b
, src
[0], src
[1]));
804 ttn_dp3(nir_builder
*b
, nir_op op
, nir_alu_dest dest
, nir_ssa_def
**src
)
806 ttn_move_dest(b
, dest
, nir_fdot3(b
, src
[0], src
[1]));
810 ttn_dp4(nir_builder
*b
, nir_op op
, nir_alu_dest dest
, nir_ssa_def
**src
)
812 ttn_move_dest(b
, dest
, nir_fdot4(b
, src
[0], src
[1]));
816 ttn_dph(nir_builder
*b
, nir_op op
, nir_alu_dest dest
, nir_ssa_def
**src
)
818 ttn_move_dest(b
, dest
, nir_fadd(b
, nir_fdot3(b
, src
[0], src
[1]),
819 ttn_channel(b
, src
[1], W
)));
823 ttn_umad(nir_builder
*b
, nir_op op
, nir_alu_dest dest
, nir_ssa_def
**src
)
825 ttn_move_dest(b
, dest
, nir_iadd(b
, nir_imul(b
, src
[0], src
[1]), src
[2]));
829 ttn_arr(nir_builder
*b
, nir_op op
, nir_alu_dest dest
, nir_ssa_def
**src
)
831 ttn_move_dest(b
, dest
, nir_ffloor(b
, nir_fadd(b
, src
[0], nir_imm_float(b
, 0.5))));
835 ttn_cmp(nir_builder
*b
, nir_op op
, nir_alu_dest dest
, nir_ssa_def
**src
)
837 ttn_move_dest(b
, dest
, nir_bcsel(b
,
838 nir_flt(b
, src
[0], nir_imm_float(b
, 0.0)),
843 ttn_ucmp(nir_builder
*b
, nir_op op
, nir_alu_dest dest
, nir_ssa_def
**src
)
845 ttn_move_dest(b
, dest
, nir_bcsel(b
,
846 nir_ine(b
, src
[0], nir_imm_int(b
, 0)),
851 ttn_kill(nir_builder
*b
, nir_op op
, nir_alu_dest dest
, nir_ssa_def
**src
)
853 nir_intrinsic_instr
*discard
=
854 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_discard
);
855 nir_instr_insert_after_cf_list(b
->cf_node_list
, &discard
->instr
);
859 ttn_kill_if(nir_builder
*b
, nir_op op
, nir_alu_dest dest
, nir_ssa_def
**src
)
861 nir_ssa_def
*cmp
= nir_bany4(b
, nir_flt(b
, src
[0], nir_imm_float(b
, 0.0)));
862 nir_intrinsic_instr
*discard
=
863 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_discard_if
);
864 discard
->src
[0] = nir_src_for_ssa(cmp
);
865 nir_instr_insert_after_cf_list(b
->cf_node_list
, &discard
->instr
);
869 ttn_if(struct ttn_compile
*c
, nir_ssa_def
*src
, bool is_uint
)
871 nir_builder
*b
= &c
->build
;
873 /* Save the outside-of-the-if-statement node list. */
874 c
->if_stack
[c
->if_stack_pos
] = b
->cf_node_list
;
877 src
= ttn_channel(b
, src
, X
);
879 nir_if
*if_stmt
= nir_if_create(b
->shader
);
881 if_stmt
->condition
= nir_src_for_ssa(nir_ine(b
, src
, nir_imm_int(b
, 0)));
883 if_stmt
->condition
= nir_src_for_ssa(nir_fne(b
, src
, nir_imm_int(b
, 0)));
885 nir_cf_node_insert_end(b
->cf_node_list
, &if_stmt
->cf_node
);
887 nir_builder_insert_after_cf_list(b
, &if_stmt
->then_list
);
889 c
->if_stack
[c
->if_stack_pos
] = &if_stmt
->else_list
;
894 ttn_else(struct ttn_compile
*c
)
896 nir_builder
*b
= &c
->build
;
898 nir_builder_insert_after_cf_list(b
, c
->if_stack
[c
->if_stack_pos
- 1]);
902 ttn_endif(struct ttn_compile
*c
)
904 nir_builder
*b
= &c
->build
;
906 c
->if_stack_pos
-= 2;
907 nir_builder_insert_after_cf_list(b
, c
->if_stack
[c
->if_stack_pos
]);
911 ttn_bgnloop(struct ttn_compile
*c
)
913 nir_builder
*b
= &c
->build
;
915 /* Save the outside-of-the-loop node list. */
916 c
->loop_stack
[c
->loop_stack_pos
] = b
->cf_node_list
;
919 nir_loop
*loop
= nir_loop_create(b
->shader
);
920 nir_cf_node_insert_end(b
->cf_node_list
, &loop
->cf_node
);
922 nir_builder_insert_after_cf_list(b
, &loop
->body
);
926 ttn_cont(nir_builder
*b
)
928 nir_jump_instr
*instr
= nir_jump_instr_create(b
->shader
, nir_jump_continue
);
929 nir_instr_insert_after_cf_list(b
->cf_node_list
, &instr
->instr
);
933 ttn_brk(nir_builder
*b
)
935 nir_jump_instr
*instr
= nir_jump_instr_create(b
->shader
, nir_jump_break
);
936 nir_instr_insert_after_cf_list(b
->cf_node_list
, &instr
->instr
);
940 ttn_endloop(struct ttn_compile
*c
)
942 nir_builder
*b
= &c
->build
;
945 nir_builder_insert_after_cf_list(b
, c
->loop_stack
[c
->loop_stack_pos
]);
949 setup_texture_info(nir_tex_instr
*instr
, unsigned texture
)
952 case TGSI_TEXTURE_1D
:
953 instr
->sampler_dim
= GLSL_SAMPLER_DIM_1D
;
955 case TGSI_TEXTURE_1D_ARRAY
:
956 instr
->sampler_dim
= GLSL_SAMPLER_DIM_1D
;
957 instr
->is_array
= true;
959 case TGSI_TEXTURE_SHADOW1D
:
960 instr
->sampler_dim
= GLSL_SAMPLER_DIM_1D
;
961 instr
->is_shadow
= true;
963 case TGSI_TEXTURE_SHADOW1D_ARRAY
:
964 instr
->sampler_dim
= GLSL_SAMPLER_DIM_1D
;
965 instr
->is_shadow
= true;
966 instr
->is_array
= true;
968 case TGSI_TEXTURE_2D
:
969 instr
->sampler_dim
= GLSL_SAMPLER_DIM_2D
;
971 case TGSI_TEXTURE_2D_ARRAY
:
972 instr
->sampler_dim
= GLSL_SAMPLER_DIM_2D
;
973 instr
->is_array
= true;
975 case TGSI_TEXTURE_2D_MSAA
:
976 instr
->sampler_dim
= GLSL_SAMPLER_DIM_MS
;
978 case TGSI_TEXTURE_2D_ARRAY_MSAA
:
979 instr
->sampler_dim
= GLSL_SAMPLER_DIM_MS
;
980 instr
->is_array
= true;
982 case TGSI_TEXTURE_SHADOW2D
:
983 instr
->sampler_dim
= GLSL_SAMPLER_DIM_2D
;
984 instr
->is_shadow
= true;
986 case TGSI_TEXTURE_SHADOW2D_ARRAY
:
987 instr
->sampler_dim
= GLSL_SAMPLER_DIM_2D
;
988 instr
->is_shadow
= true;
989 instr
->is_array
= true;
991 case TGSI_TEXTURE_3D
:
992 instr
->sampler_dim
= GLSL_SAMPLER_DIM_3D
;
994 case TGSI_TEXTURE_CUBE
:
995 instr
->sampler_dim
= GLSL_SAMPLER_DIM_CUBE
;
997 case TGSI_TEXTURE_CUBE_ARRAY
:
998 instr
->sampler_dim
= GLSL_SAMPLER_DIM_CUBE
;
999 instr
->is_array
= true;
1001 case TGSI_TEXTURE_SHADOWCUBE
:
1002 instr
->sampler_dim
= GLSL_SAMPLER_DIM_CUBE
;
1003 instr
->is_shadow
= true;
1005 case TGSI_TEXTURE_SHADOWCUBE_ARRAY
:
1006 instr
->sampler_dim
= GLSL_SAMPLER_DIM_CUBE
;
1007 instr
->is_shadow
= true;
1008 instr
->is_array
= true;
1010 case TGSI_TEXTURE_RECT
:
1011 instr
->sampler_dim
= GLSL_SAMPLER_DIM_RECT
;
1013 case TGSI_TEXTURE_SHADOWRECT
:
1014 instr
->sampler_dim
= GLSL_SAMPLER_DIM_RECT
;
1015 instr
->is_shadow
= true;
1018 fprintf(stderr
, "Unknown TGSI texture target %d\n", texture
);
1024 ttn_tex(struct ttn_compile
*c
, nir_alu_dest dest
, nir_ssa_def
**src
)
1026 nir_builder
*b
= &c
->build
;
1027 struct tgsi_full_instruction
*tgsi_inst
= &c
->token
->FullInstruction
;
1028 nir_tex_instr
*instr
;
1030 unsigned num_srcs
, samp
= 1, i
;
1032 switch (tgsi_inst
->Instruction
.Opcode
) {
1033 case TGSI_OPCODE_TEX
:
1037 case TGSI_OPCODE_TXP
:
1041 case TGSI_OPCODE_TXB
:
1045 case TGSI_OPCODE_TXL
:
1049 case TGSI_OPCODE_TXL2
:
1054 case TGSI_OPCODE_TXF
:
1058 case TGSI_OPCODE_TXD
:
1065 fprintf(stderr
, "unknown TGSI tex op %d\n", tgsi_inst
->Instruction
.Opcode
);
1069 if (tgsi_inst
->Texture
.Texture
== TGSI_TEXTURE_SHADOW1D
||
1070 tgsi_inst
->Texture
.Texture
== TGSI_TEXTURE_SHADOW1D_ARRAY
||
1071 tgsi_inst
->Texture
.Texture
== TGSI_TEXTURE_SHADOW2D
||
1072 tgsi_inst
->Texture
.Texture
== TGSI_TEXTURE_SHADOW2D_ARRAY
||
1073 tgsi_inst
->Texture
.Texture
== TGSI_TEXTURE_SHADOWRECT
||
1074 tgsi_inst
->Texture
.Texture
== TGSI_TEXTURE_SHADOWCUBE
||
1075 tgsi_inst
->Texture
.Texture
== TGSI_TEXTURE_SHADOWCUBE_ARRAY
) {
1079 num_srcs
+= tgsi_inst
->Texture
.NumOffsets
;
1081 instr
= nir_tex_instr_create(b
->shader
, num_srcs
);
1084 setup_texture_info(instr
, tgsi_inst
->Texture
.Texture
);
1086 switch (instr
->sampler_dim
) {
1087 case GLSL_SAMPLER_DIM_1D
:
1088 case GLSL_SAMPLER_DIM_BUF
:
1089 instr
->coord_components
= 1;
1091 case GLSL_SAMPLER_DIM_2D
:
1092 case GLSL_SAMPLER_DIM_RECT
:
1093 case GLSL_SAMPLER_DIM_EXTERNAL
:
1094 case GLSL_SAMPLER_DIM_MS
:
1095 instr
->coord_components
= 2;
1097 case GLSL_SAMPLER_DIM_3D
:
1098 case GLSL_SAMPLER_DIM_CUBE
:
1099 instr
->coord_components
= 3;
1103 if (instr
->is_array
)
1104 instr
->coord_components
++;
1106 assert(tgsi_inst
->Src
[samp
].Register
.File
== TGSI_FILE_SAMPLER
);
1107 instr
->sampler_index
= tgsi_inst
->Src
[samp
].Register
.Index
;
1109 unsigned src_number
= 0;
1111 instr
->src
[src_number
].src
=
1112 nir_src_for_ssa(nir_swizzle(b
, src
[0], SWIZ(X
, Y
, Z
, W
),
1113 instr
->coord_components
, false));
1114 instr
->src
[src_number
].src_type
= nir_tex_src_coord
;
1117 if (tgsi_inst
->Instruction
.Opcode
== TGSI_OPCODE_TXP
) {
1118 instr
->src
[src_number
].src
= nir_src_for_ssa(ttn_channel(b
, src
[0], W
));
1119 instr
->src
[src_number
].src_type
= nir_tex_src_projector
;
1123 if (tgsi_inst
->Instruction
.Opcode
== TGSI_OPCODE_TXB
) {
1124 instr
->src
[src_number
].src
= nir_src_for_ssa(ttn_channel(b
, src
[0], W
));
1125 instr
->src
[src_number
].src_type
= nir_tex_src_bias
;
1129 if (tgsi_inst
->Instruction
.Opcode
== TGSI_OPCODE_TXL
) {
1130 instr
->src
[src_number
].src
= nir_src_for_ssa(ttn_channel(b
, src
[0], W
));
1131 instr
->src
[src_number
].src_type
= nir_tex_src_lod
;
1135 if (tgsi_inst
->Instruction
.Opcode
== TGSI_OPCODE_TXL2
) {
1136 instr
->src
[src_number
].src
= nir_src_for_ssa(ttn_channel(b
, src
[1], X
));
1137 instr
->src
[src_number
].src_type
= nir_tex_src_lod
;
1141 if (tgsi_inst
->Instruction
.Opcode
== TGSI_OPCODE_TXF
) {
1142 instr
->src
[src_number
].src
= nir_src_for_ssa(ttn_channel(b
, src
[0], W
));
1143 instr
->src
[src_number
].src_type
= nir_tex_src_lod
;
1147 if (tgsi_inst
->Instruction
.Opcode
== TGSI_OPCODE_TXD
) {
1148 instr
->src
[src_number
].src
=
1149 nir_src_for_ssa(nir_swizzle(b
, src
[1], SWIZ(X
, Y
, Z
, W
),
1150 instr
->coord_components
, false));
1151 instr
->src
[src_number
].src_type
= nir_tex_src_ddx
;
1153 instr
->src
[src_number
].src
=
1154 nir_src_for_ssa(nir_swizzle(b
, src
[2], SWIZ(X
, Y
, Z
, W
),
1155 instr
->coord_components
, false));
1156 instr
->src
[src_number
].src_type
= nir_tex_src_ddy
;
1160 if (instr
->is_shadow
) {
1161 if (instr
->coord_components
< 3)
1162 instr
->src
[src_number
].src
= nir_src_for_ssa(ttn_channel(b
, src
[0], Z
));
1164 instr
->src
[src_number
].src
= nir_src_for_ssa(ttn_channel(b
, src
[0], W
));
1166 instr
->src
[src_number
].src_type
= nir_tex_src_comparitor
;
1170 for (i
= 0; i
< tgsi_inst
->Texture
.NumOffsets
; i
++) {
1171 struct tgsi_texture_offset
*tex_offset
= &tgsi_inst
->TexOffsets
[i
];
1172 /* since TexOffset ins't using tgsi_full_src_register we get to
1173 * do some extra gymnastics:
1177 memset(&src
, 0, sizeof(src
));
1179 src
.src
= ttn_src_for_file_and_index(c
,
1184 src
.swizzle
[0] = tex_offset
->SwizzleX
;
1185 src
.swizzle
[1] = tex_offset
->SwizzleY
;
1186 src
.swizzle
[2] = tex_offset
->SwizzleZ
;
1187 src
.swizzle
[3] = TGSI_SWIZZLE_W
;
1189 instr
->src
[src_number
].src_type
= nir_tex_src_offset
;
1190 instr
->src
[src_number
].src
= nir_src_for_ssa(
1191 nir_fmov_alu(b
, src
, nir_tex_instr_src_size(instr
, src_number
)));
1195 assert(src_number
== num_srcs
);
1197 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
, 4, NULL
);
1198 nir_instr_insert_after_cf_list(b
->cf_node_list
, &instr
->instr
);
1200 /* Resolve the writemask on the texture op. */
1201 ttn_move_dest(b
, dest
, &instr
->dest
.ssa
);
1204 /* TGSI_OPCODE_TXQ is actually two distinct operations:
1206 * dst.x = texture\_width(unit, lod)
1207 * dst.y = texture\_height(unit, lod)
1208 * dst.z = texture\_depth(unit, lod)
1209 * dst.w = texture\_levels(unit)
1211 * dst.xyz map to NIR txs opcode, and dst.w maps to query_levels
1214 ttn_txq(struct ttn_compile
*c
, nir_alu_dest dest
, nir_ssa_def
**src
)
1216 nir_builder
*b
= &c
->build
;
1217 struct tgsi_full_instruction
*tgsi_inst
= &c
->token
->FullInstruction
;
1218 nir_tex_instr
*txs
, *qlv
;
1220 txs
= nir_tex_instr_create(b
->shader
, 1);
1221 txs
->op
= nir_texop_txs
;
1222 setup_texture_info(txs
, tgsi_inst
->Texture
.Texture
);
1224 qlv
= nir_tex_instr_create(b
->shader
, 0);
1225 qlv
->op
= nir_texop_query_levels
;
1226 setup_texture_info(qlv
, tgsi_inst
->Texture
.Texture
);
1228 assert(tgsi_inst
->Src
[1].Register
.File
== TGSI_FILE_SAMPLER
);
1229 txs
->sampler_index
= tgsi_inst
->Src
[1].Register
.Index
;
1230 qlv
->sampler_index
= tgsi_inst
->Src
[1].Register
.Index
;
1232 /* only single src, the lod: */
1233 txs
->src
[0].src
= nir_src_for_ssa(ttn_channel(b
, src
[0], X
));
1234 txs
->src
[0].src_type
= nir_tex_src_lod
;
1236 nir_ssa_dest_init(&txs
->instr
, &txs
->dest
, 3, NULL
);
1237 nir_instr_insert_after_cf_list(b
->cf_node_list
, &txs
->instr
);
1239 nir_ssa_dest_init(&qlv
->instr
, &qlv
->dest
, 1, NULL
);
1240 nir_instr_insert_after_cf_list(b
->cf_node_list
, &qlv
->instr
);
1242 ttn_move_dest_masked(b
, dest
, &txs
->dest
.ssa
, TGSI_WRITEMASK_XYZ
);
1243 ttn_move_dest_masked(b
, dest
, &qlv
->dest
.ssa
, TGSI_WRITEMASK_W
);
1246 static const nir_op op_trans
[TGSI_OPCODE_LAST
] = {
1247 [TGSI_OPCODE_ARL
] = 0,
1248 [TGSI_OPCODE_MOV
] = nir_op_fmov
,
1249 [TGSI_OPCODE_LIT
] = 0,
1250 [TGSI_OPCODE_RCP
] = nir_op_frcp
,
1251 [TGSI_OPCODE_RSQ
] = nir_op_frsq
,
1252 [TGSI_OPCODE_EXP
] = 0,
1253 [TGSI_OPCODE_LOG
] = 0,
1254 [TGSI_OPCODE_MUL
] = nir_op_fmul
,
1255 [TGSI_OPCODE_ADD
] = nir_op_fadd
,
1256 [TGSI_OPCODE_DP3
] = 0,
1257 [TGSI_OPCODE_DP4
] = 0,
1258 [TGSI_OPCODE_DST
] = 0,
1259 [TGSI_OPCODE_MIN
] = nir_op_fmin
,
1260 [TGSI_OPCODE_MAX
] = nir_op_fmax
,
1261 [TGSI_OPCODE_SLT
] = nir_op_slt
,
1262 [TGSI_OPCODE_SGE
] = nir_op_sge
,
1263 [TGSI_OPCODE_MAD
] = nir_op_ffma
,
1264 [TGSI_OPCODE_SUB
] = nir_op_fsub
,
1265 [TGSI_OPCODE_LRP
] = 0,
1266 [TGSI_OPCODE_SQRT
] = nir_op_fsqrt
,
1267 [TGSI_OPCODE_DP2A
] = 0,
1268 [TGSI_OPCODE_FRC
] = nir_op_ffract
,
1269 [TGSI_OPCODE_CLAMP
] = 0,
1270 [TGSI_OPCODE_FLR
] = nir_op_ffloor
,
1271 [TGSI_OPCODE_ROUND
] = nir_op_fround_even
,
1272 [TGSI_OPCODE_EX2
] = nir_op_fexp2
,
1273 [TGSI_OPCODE_LG2
] = nir_op_flog2
,
1274 [TGSI_OPCODE_POW
] = nir_op_fpow
,
1275 [TGSI_OPCODE_XPD
] = 0,
1276 [TGSI_OPCODE_ABS
] = nir_op_fabs
,
1277 [TGSI_OPCODE_DPH
] = 0,
1278 [TGSI_OPCODE_COS
] = nir_op_fcos
,
1279 [TGSI_OPCODE_DDX
] = nir_op_fddx
,
1280 [TGSI_OPCODE_DDY
] = nir_op_fddy
,
1281 [TGSI_OPCODE_KILL
] = 0,
1282 [TGSI_OPCODE_PK2H
] = 0, /* XXX */
1283 [TGSI_OPCODE_PK2US
] = 0, /* XXX */
1284 [TGSI_OPCODE_PK4B
] = 0, /* XXX */
1285 [TGSI_OPCODE_PK4UB
] = 0, /* XXX */
1286 [TGSI_OPCODE_SEQ
] = nir_op_seq
,
1287 [TGSI_OPCODE_SGT
] = 0,
1288 [TGSI_OPCODE_SIN
] = nir_op_fsin
,
1289 [TGSI_OPCODE_SLE
] = 0,
1290 [TGSI_OPCODE_TEX
] = 0,
1291 [TGSI_OPCODE_TXD
] = 0,
1292 [TGSI_OPCODE_TXP
] = 0,
1293 [TGSI_OPCODE_UP2H
] = 0, /* XXX */
1294 [TGSI_OPCODE_UP2US
] = 0, /* XXX */
1295 [TGSI_OPCODE_UP4B
] = 0, /* XXX */
1296 [TGSI_OPCODE_UP4UB
] = 0, /* XXX */
1297 [TGSI_OPCODE_ARR
] = 0,
1299 /* No function calls, yet. */
1300 [TGSI_OPCODE_CAL
] = 0, /* XXX */
1301 [TGSI_OPCODE_RET
] = 0, /* XXX */
1303 [TGSI_OPCODE_SSG
] = nir_op_fsign
,
1304 [TGSI_OPCODE_CMP
] = 0,
1305 [TGSI_OPCODE_SCS
] = 0,
1306 [TGSI_OPCODE_TXB
] = 0,
1307 [TGSI_OPCODE_DIV
] = nir_op_fdiv
,
1308 [TGSI_OPCODE_DP2
] = 0,
1309 [TGSI_OPCODE_DP2A
] = 0,
1310 [TGSI_OPCODE_TXL
] = 0,
1312 [TGSI_OPCODE_BRK
] = 0,
1313 [TGSI_OPCODE_IF
] = 0,
1314 [TGSI_OPCODE_UIF
] = 0,
1315 [TGSI_OPCODE_ELSE
] = 0,
1316 [TGSI_OPCODE_ENDIF
] = 0,
1318 [TGSI_OPCODE_DDX_FINE
] = nir_op_fddx_fine
,
1319 [TGSI_OPCODE_DDY_FINE
] = nir_op_fddy_fine
,
1321 [TGSI_OPCODE_PUSHA
] = 0, /* XXX */
1322 [TGSI_OPCODE_POPA
] = 0, /* XXX */
1324 [TGSI_OPCODE_CEIL
] = nir_op_fceil
,
1325 [TGSI_OPCODE_I2F
] = nir_op_i2f
,
1326 [TGSI_OPCODE_NOT
] = nir_op_inot
,
1327 [TGSI_OPCODE_TRUNC
] = nir_op_ftrunc
,
1328 [TGSI_OPCODE_SHL
] = nir_op_ishl
,
1329 [TGSI_OPCODE_AND
] = nir_op_iand
,
1330 [TGSI_OPCODE_OR
] = nir_op_ior
,
1331 [TGSI_OPCODE_MOD
] = nir_op_umod
,
1332 [TGSI_OPCODE_XOR
] = nir_op_ixor
,
1333 [TGSI_OPCODE_SAD
] = 0, /* XXX */
1334 [TGSI_OPCODE_TXF
] = 0,
1335 [TGSI_OPCODE_TXQ
] = 0,
1337 [TGSI_OPCODE_CONT
] = 0,
1339 [TGSI_OPCODE_EMIT
] = 0, /* XXX */
1340 [TGSI_OPCODE_ENDPRIM
] = 0, /* XXX */
1342 [TGSI_OPCODE_BGNLOOP
] = 0,
1343 [TGSI_OPCODE_BGNSUB
] = 0, /* XXX: no function calls */
1344 [TGSI_OPCODE_ENDLOOP
] = 0,
1345 [TGSI_OPCODE_ENDSUB
] = 0, /* XXX: no function calls */
1347 [TGSI_OPCODE_TXQ_LZ
] = 0,
1348 [TGSI_OPCODE_NOP
] = 0,
1349 [TGSI_OPCODE_FSEQ
] = nir_op_feq
,
1350 [TGSI_OPCODE_FSGE
] = nir_op_fge
,
1351 [TGSI_OPCODE_FSLT
] = nir_op_flt
,
1352 [TGSI_OPCODE_FSNE
] = nir_op_fne
,
1354 /* No control flow yet */
1355 [TGSI_OPCODE_CALLNZ
] = 0, /* XXX */
1356 [TGSI_OPCODE_BREAKC
] = 0, /* not emitted by glsl_to_tgsi.cpp */
1358 [TGSI_OPCODE_KILL_IF
] = 0,
1360 [TGSI_OPCODE_END
] = 0,
1362 [TGSI_OPCODE_F2I
] = nir_op_f2i
,
1363 [TGSI_OPCODE_IDIV
] = nir_op_idiv
,
1364 [TGSI_OPCODE_IMAX
] = nir_op_imax
,
1365 [TGSI_OPCODE_IMIN
] = nir_op_imin
,
1366 [TGSI_OPCODE_INEG
] = nir_op_ineg
,
1367 [TGSI_OPCODE_ISGE
] = nir_op_ige
,
1368 [TGSI_OPCODE_ISHR
] = nir_op_ishr
,
1369 [TGSI_OPCODE_ISLT
] = nir_op_ilt
,
1370 [TGSI_OPCODE_F2U
] = nir_op_f2u
,
1371 [TGSI_OPCODE_U2F
] = nir_op_u2f
,
1372 [TGSI_OPCODE_UADD
] = nir_op_iadd
,
1373 [TGSI_OPCODE_UDIV
] = nir_op_udiv
,
1374 [TGSI_OPCODE_UMAD
] = 0,
1375 [TGSI_OPCODE_UMAX
] = nir_op_umax
,
1376 [TGSI_OPCODE_UMIN
] = nir_op_umin
,
1377 [TGSI_OPCODE_UMOD
] = nir_op_umod
,
1378 [TGSI_OPCODE_UMUL
] = nir_op_imul
,
1379 [TGSI_OPCODE_USEQ
] = nir_op_ieq
,
1380 [TGSI_OPCODE_USGE
] = nir_op_uge
,
1381 [TGSI_OPCODE_USHR
] = nir_op_ushr
,
1382 [TGSI_OPCODE_USLT
] = nir_op_ult
,
1383 [TGSI_OPCODE_USNE
] = nir_op_ine
,
1385 [TGSI_OPCODE_SWITCH
] = 0, /* not emitted by glsl_to_tgsi.cpp */
1386 [TGSI_OPCODE_CASE
] = 0, /* not emitted by glsl_to_tgsi.cpp */
1387 [TGSI_OPCODE_DEFAULT
] = 0, /* not emitted by glsl_to_tgsi.cpp */
1388 [TGSI_OPCODE_ENDSWITCH
] = 0, /* not emitted by glsl_to_tgsi.cpp */
1390 /* XXX: SAMPLE opcodes */
1392 [TGSI_OPCODE_UARL
] = nir_op_imov
,
1393 [TGSI_OPCODE_UCMP
] = 0,
1394 [TGSI_OPCODE_IABS
] = nir_op_iabs
,
1395 [TGSI_OPCODE_ISSG
] = nir_op_isign
,
1399 [TGSI_OPCODE_TEX2
] = 0,
1400 [TGSI_OPCODE_TXB2
] = 0,
1401 [TGSI_OPCODE_TXL2
] = 0,
1403 [TGSI_OPCODE_IMUL_HI
] = nir_op_imul_high
,
1404 [TGSI_OPCODE_UMUL_HI
] = nir_op_umul_high
,
1406 [TGSI_OPCODE_TG4
] = 0,
1407 [TGSI_OPCODE_LODQ
] = 0, /* XXX */
1409 [TGSI_OPCODE_IBFE
] = nir_op_ibitfield_extract
,
1410 [TGSI_OPCODE_UBFE
] = nir_op_ubitfield_extract
,
1411 [TGSI_OPCODE_BFI
] = nir_op_bitfield_insert
,
1412 [TGSI_OPCODE_BREV
] = nir_op_bitfield_reverse
,
1413 [TGSI_OPCODE_POPC
] = nir_op_bit_count
,
1414 [TGSI_OPCODE_LSB
] = nir_op_find_lsb
,
1415 [TGSI_OPCODE_IMSB
] = nir_op_ifind_msb
,
1416 [TGSI_OPCODE_UMSB
] = nir_op_ifind_msb
, /* XXX: signed vs unsigned */
1418 [TGSI_OPCODE_INTERP_CENTROID
] = 0, /* XXX */
1419 [TGSI_OPCODE_INTERP_SAMPLE
] = 0, /* XXX */
1420 [TGSI_OPCODE_INTERP_OFFSET
] = 0, /* XXX */
1424 ttn_emit_instruction(struct ttn_compile
*c
)
1426 nir_builder
*b
= &c
->build
;
1427 struct tgsi_full_instruction
*tgsi_inst
= &c
->token
->FullInstruction
;
1429 unsigned tgsi_op
= tgsi_inst
->Instruction
.Opcode
;
1430 struct tgsi_full_dst_register
*tgsi_dst
= &tgsi_inst
->Dst
[0];
1432 if (tgsi_op
== TGSI_OPCODE_END
)
1435 nir_ssa_def
*src
[TGSI_FULL_MAX_SRC_REGISTERS
];
1436 for (i
= 0; i
< TGSI_FULL_MAX_SRC_REGISTERS
; i
++) {
1437 src
[i
] = ttn_get_src(c
, &tgsi_inst
->Src
[i
]);
1439 nir_alu_dest dest
= ttn_get_dest(c
, tgsi_dst
);
1442 case TGSI_OPCODE_RSQ
:
1443 ttn_move_dest(b
, dest
, nir_frsq(b
, ttn_channel(b
, src
[0], X
)));
1446 case TGSI_OPCODE_SQRT
:
1447 ttn_move_dest(b
, dest
, nir_fsqrt(b
, ttn_channel(b
, src
[0], X
)));
1450 case TGSI_OPCODE_RCP
:
1451 ttn_move_dest(b
, dest
, nir_frcp(b
, ttn_channel(b
, src
[0], X
)));
1454 case TGSI_OPCODE_EX2
:
1455 ttn_move_dest(b
, dest
, nir_fexp2(b
, ttn_channel(b
, src
[0], X
)));
1458 case TGSI_OPCODE_LG2
:
1459 ttn_move_dest(b
, dest
, nir_flog2(b
, ttn_channel(b
, src
[0], X
)));
1462 case TGSI_OPCODE_POW
:
1463 ttn_move_dest(b
, dest
, nir_fpow(b
,
1464 ttn_channel(b
, src
[0], X
),
1465 ttn_channel(b
, src
[1], X
)));
1468 case TGSI_OPCODE_COS
:
1469 ttn_move_dest(b
, dest
, nir_fcos(b
, ttn_channel(b
, src
[0], X
)));
1472 case TGSI_OPCODE_SIN
:
1473 ttn_move_dest(b
, dest
, nir_fsin(b
, ttn_channel(b
, src
[0], X
)));
1476 case TGSI_OPCODE_ARL
:
1477 ttn_arl(b
, op_trans
[tgsi_op
], dest
, src
);
1480 case TGSI_OPCODE_EXP
:
1481 ttn_exp(b
, op_trans
[tgsi_op
], dest
, src
);
1484 case TGSI_OPCODE_LOG
:
1485 ttn_log(b
, op_trans
[tgsi_op
], dest
, src
);
1488 case TGSI_OPCODE_DST
:
1489 ttn_dst(b
, op_trans
[tgsi_op
], dest
, src
);
1492 case TGSI_OPCODE_LIT
:
1493 ttn_lit(b
, op_trans
[tgsi_op
], dest
, src
);
1496 case TGSI_OPCODE_CLAMP
:
1497 ttn_clamp(b
, op_trans
[tgsi_op
], dest
, src
);
1500 case TGSI_OPCODE_XPD
:
1501 ttn_xpd(b
, op_trans
[tgsi_op
], dest
, src
);
1504 case TGSI_OPCODE_DP2
:
1505 ttn_dp2(b
, op_trans
[tgsi_op
], dest
, src
);
1508 case TGSI_OPCODE_DP3
:
1509 ttn_dp3(b
, op_trans
[tgsi_op
], dest
, src
);
1512 case TGSI_OPCODE_DP4
:
1513 ttn_dp4(b
, op_trans
[tgsi_op
], dest
, src
);
1516 case TGSI_OPCODE_DP2A
:
1517 ttn_dp2a(b
, op_trans
[tgsi_op
], dest
, src
);
1520 case TGSI_OPCODE_DPH
:
1521 ttn_dph(b
, op_trans
[tgsi_op
], dest
, src
);
1524 case TGSI_OPCODE_UMAD
:
1525 ttn_umad(b
, op_trans
[tgsi_op
], dest
, src
);
1528 case TGSI_OPCODE_LRP
:
1529 ttn_move_dest(b
, dest
, nir_flrp(b
, src
[2], src
[1], src
[0]));
1532 case TGSI_OPCODE_KILL
:
1533 ttn_kill(b
, op_trans
[tgsi_op
], dest
, src
);
1536 case TGSI_OPCODE_ARR
:
1537 ttn_arr(b
, op_trans
[tgsi_op
], dest
, src
);
1540 case TGSI_OPCODE_CMP
:
1541 ttn_cmp(b
, op_trans
[tgsi_op
], dest
, src
);
1544 case TGSI_OPCODE_UCMP
:
1545 ttn_ucmp(b
, op_trans
[tgsi_op
], dest
, src
);
1548 case TGSI_OPCODE_SCS
:
1549 ttn_scs(b
, op_trans
[tgsi_op
], dest
, src
);
1552 case TGSI_OPCODE_SGT
:
1553 ttn_sgt(b
, op_trans
[tgsi_op
], dest
, src
);
1556 case TGSI_OPCODE_SLE
:
1557 ttn_sle(b
, op_trans
[tgsi_op
], dest
, src
);
1560 case TGSI_OPCODE_KILL_IF
:
1561 ttn_kill_if(b
, op_trans
[tgsi_op
], dest
, src
);
1564 case TGSI_OPCODE_TEX
:
1565 case TGSI_OPCODE_TXP
:
1566 case TGSI_OPCODE_TXL
:
1567 case TGSI_OPCODE_TXB
:
1568 case TGSI_OPCODE_TXD
:
1569 case TGSI_OPCODE_TXL2
:
1570 case TGSI_OPCODE_TXB2
:
1571 case TGSI_OPCODE_TXQ_LZ
:
1572 case TGSI_OPCODE_TXF
:
1573 case TGSI_OPCODE_TG4
:
1574 ttn_tex(c
, dest
, src
);
1577 case TGSI_OPCODE_TXQ
:
1578 ttn_txq(c
, dest
, src
);
1581 case TGSI_OPCODE_NOP
:
1584 case TGSI_OPCODE_IF
:
1585 ttn_if(c
, src
[0], false);
1588 case TGSI_OPCODE_UIF
:
1589 ttn_if(c
, src
[0], true);
1592 case TGSI_OPCODE_ELSE
:
1596 case TGSI_OPCODE_ENDIF
:
1600 case TGSI_OPCODE_BGNLOOP
:
1604 case TGSI_OPCODE_BRK
:
1608 case TGSI_OPCODE_CONT
:
1612 case TGSI_OPCODE_ENDLOOP
:
1617 if (op_trans
[tgsi_op
] != 0 || tgsi_op
== TGSI_OPCODE_MOV
) {
1618 ttn_alu(b
, op_trans
[tgsi_op
], dest
, src
);
1620 fprintf(stderr
, "unknown TGSI opcode: %s\n",
1621 tgsi_get_opcode_name(tgsi_op
));
1627 if (tgsi_inst
->Instruction
.Saturate
) {
1628 assert(!dest
.dest
.is_ssa
);
1629 ttn_move_dest(b
, dest
, nir_fsat(b
, ttn_src_for_dest(b
, &dest
)));
1632 /* if the dst has a matching var, append store_global to move
1633 * output from reg to var
1635 nir_variable
*var
= ttn_get_var(c
, tgsi_dst
);
1637 unsigned index
= tgsi_dst
->Register
.Index
;
1638 unsigned offset
= c
->temp_regs
[index
].offset
;
1639 nir_intrinsic_instr
*store
=
1640 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_store_var
);
1641 struct tgsi_ind_register
*indirect
= tgsi_dst
->Register
.Indirect
?
1642 &tgsi_dst
->Indirect
: NULL
;
1644 store
->num_components
= 4;
1645 store
->variables
[0] = ttn_array_deref(c
, store
, var
, offset
, indirect
);
1646 store
->src
[0] = nir_src_for_reg(dest
.dest
.reg
.reg
);
1648 nir_instr_insert_after_cf_list(b
->cf_node_list
, &store
->instr
);
1653 * Puts a NIR intrinsic to store of each TGSI_FILE_OUTPUT value to the output
1654 * variables at the end of the shader.
1656 * We don't generate these incrementally as the TGSI_FILE_OUTPUT values are
1657 * written, because there's no output load intrinsic, which means we couldn't
1658 * handle writemasks.
1661 ttn_add_output_stores(struct ttn_compile
*c
)
1663 nir_builder
*b
= &c
->build
;
1665 foreach_list_typed(nir_variable
, var
, node
, &b
->shader
->outputs
) {
1666 unsigned array_len
= MAX2(glsl_get_length(var
->type
), 1);
1669 for (i
= 0; i
< array_len
; i
++) {
1670 nir_intrinsic_instr
*store
=
1671 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_store_output
);
1672 store
->num_components
= 4;
1673 store
->const_index
[0] = var
->data
.driver_location
+ i
;
1674 store
->const_index
[1] = 1;
1675 store
->src
[0].reg
.reg
= c
->output_regs
[var
->data
.driver_location
].reg
;
1676 nir_instr_insert_after_cf_list(b
->cf_node_list
, &store
->instr
);
1682 tgsi_to_nir(const void *tgsi_tokens
,
1683 const nir_shader_compiler_options
*options
)
1685 struct tgsi_parse_context parser
;
1686 struct tgsi_shader_info scan
;
1687 struct ttn_compile
*c
;
1688 struct nir_shader
*s
;
1691 c
= rzalloc(NULL
, struct ttn_compile
);
1692 s
= nir_shader_create(NULL
, options
);
1694 nir_function
*func
= nir_function_create(s
, "main");
1695 nir_function_overload
*overload
= nir_function_overload_create(func
);
1696 nir_function_impl
*impl
= nir_function_impl_create(overload
);
1698 nir_builder_init(&c
->build
, impl
);
1699 nir_builder_insert_after_cf_list(&c
->build
, &impl
->body
);
1701 tgsi_scan_shader(tgsi_tokens
, &scan
);
1704 s
->num_inputs
= scan
.file_max
[TGSI_FILE_INPUT
] + 1;
1705 s
->num_uniforms
= scan
.const_file_max
[0] + 1;
1706 s
->num_outputs
= scan
.file_max
[TGSI_FILE_OUTPUT
] + 1;
1708 c
->output_regs
= rzalloc_array(c
, struct ttn_reg_info
,
1709 scan
.file_max
[TGSI_FILE_OUTPUT
] + 1);
1710 c
->temp_regs
= rzalloc_array(c
, struct ttn_reg_info
,
1711 scan
.file_max
[TGSI_FILE_TEMPORARY
] + 1);
1712 c
->imm_defs
= rzalloc_array(c
, nir_ssa_def
*,
1713 scan
.file_max
[TGSI_FILE_IMMEDIATE
] + 1);
1715 c
->if_stack
= rzalloc_array(c
, struct exec_list
*,
1716 (scan
.opcode_count
[TGSI_OPCODE_IF
] +
1717 scan
.opcode_count
[TGSI_OPCODE_UIF
]) * 2);
1718 c
->loop_stack
= rzalloc_array(c
, struct exec_list
*,
1719 scan
.opcode_count
[TGSI_OPCODE_BGNLOOP
]);
1721 ret
= tgsi_parse_init(&parser
, tgsi_tokens
);
1722 assert(ret
== TGSI_PARSE_OK
);
1724 while (!tgsi_parse_end_of_tokens(&parser
)) {
1725 tgsi_parse_token(&parser
);
1726 c
->token
= &parser
.FullToken
;
1728 switch (parser
.FullToken
.Token
.Type
) {
1729 case TGSI_TOKEN_TYPE_DECLARATION
:
1730 ttn_emit_declaration(c
);
1733 case TGSI_TOKEN_TYPE_INSTRUCTION
:
1734 ttn_emit_instruction(c
);
1737 case TGSI_TOKEN_TYPE_IMMEDIATE
:
1738 ttn_emit_immediate(c
);
1743 tgsi_parse_free(&parser
);
1745 ttn_add_output_stores(c
);