2 * Copyright © 2014-2015 Broadcom
3 * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "util/ralloc.h"
26 #include "compiler/nir/nir.h"
27 #include "compiler/nir/nir_control_flow.h"
28 #include "compiler/nir/nir_builder.h"
29 #include "compiler/glsl/list.h"
30 #include "compiler/shader_enums.h"
32 #include "tgsi_to_nir.h"
33 #include "tgsi/tgsi_parse.h"
34 #include "tgsi/tgsi_dump.h"
35 #include "tgsi/tgsi_info.h"
36 #include "tgsi/tgsi_scan.h"
37 #include "tgsi/tgsi_from_mesa.h"
39 #define SWIZ(X, Y, Z, W) (unsigned[4]){ \
47 /** nir register containing this TGSI index. */
50 /** Offset (in vec4s) from the start of var for this TGSI index. */
55 union tgsi_full_token
*token
;
57 struct tgsi_shader_info
*scan
;
59 struct ttn_reg_info
*output_regs
;
60 struct ttn_reg_info
*temp_regs
;
61 nir_ssa_def
**imm_defs
;
63 unsigned num_samp_types
;
64 nir_alu_type
*samp_types
;
66 nir_register
*addr_reg
;
68 nir_variable
**inputs
;
69 nir_variable
**outputs
;
72 * Stack of nir_cursors where instructions should be pushed as we pop
73 * back out of the control flow stack.
75 * For each IF/ELSE/ENDIF block, if_stack[if_stack_pos] has where the else
76 * instructions should be placed, and if_stack[if_stack_pos - 1] has where
77 * the next instructions outside of the if/then/else block go.
80 unsigned if_stack_pos
;
83 * Stack of nir_cursors where instructions should be pushed as we pop
84 * back out of the control flow stack.
86 * loop_stack[loop_stack_pos - 1] contains the cf_node_list for the outside
89 nir_cursor
*loop_stack
;
90 unsigned loop_stack_pos
;
92 /* How many TGSI_FILE_IMMEDIATE vec4s have been parsed so far. */
96 #define ttn_swizzle(b, src, x, y, z, w) \
97 nir_swizzle(b, src, SWIZ(x, y, z, w), 4, false)
98 #define ttn_channel(b, src, swiz) \
99 nir_swizzle(b, src, SWIZ(swiz, swiz, swiz, swiz), 1, false)
101 static gl_varying_slot
102 tgsi_varying_semantic_to_slot(unsigned semantic
, unsigned index
)
105 case TGSI_SEMANTIC_POSITION
:
106 return VARYING_SLOT_POS
;
107 case TGSI_SEMANTIC_COLOR
:
109 return VARYING_SLOT_COL0
;
111 return VARYING_SLOT_COL1
;
112 case TGSI_SEMANTIC_BCOLOR
:
114 return VARYING_SLOT_BFC0
;
116 return VARYING_SLOT_BFC1
;
117 case TGSI_SEMANTIC_FOG
:
118 return VARYING_SLOT_FOGC
;
119 case TGSI_SEMANTIC_PSIZE
:
120 return VARYING_SLOT_PSIZ
;
121 case TGSI_SEMANTIC_GENERIC
:
122 return VARYING_SLOT_VAR0
+ index
;
123 case TGSI_SEMANTIC_FACE
:
124 return VARYING_SLOT_FACE
;
125 case TGSI_SEMANTIC_EDGEFLAG
:
126 return VARYING_SLOT_EDGE
;
127 case TGSI_SEMANTIC_PRIMID
:
128 return VARYING_SLOT_PRIMITIVE_ID
;
129 case TGSI_SEMANTIC_CLIPDIST
:
131 return VARYING_SLOT_CLIP_DIST0
;
133 return VARYING_SLOT_CLIP_DIST1
;
134 case TGSI_SEMANTIC_CLIPVERTEX
:
135 return VARYING_SLOT_CLIP_VERTEX
;
136 case TGSI_SEMANTIC_TEXCOORD
:
137 return VARYING_SLOT_TEX0
+ index
;
138 case TGSI_SEMANTIC_PCOORD
:
139 return VARYING_SLOT_PNTC
;
140 case TGSI_SEMANTIC_VIEWPORT_INDEX
:
141 return VARYING_SLOT_VIEWPORT
;
142 case TGSI_SEMANTIC_LAYER
:
143 return VARYING_SLOT_LAYER
;
145 fprintf(stderr
, "Bad TGSI semantic: %d/%d\n", semantic
, index
);
151 ttn_src_for_dest(nir_builder
*b
, nir_alu_dest
*dest
)
154 memset(&src
, 0, sizeof(src
));
156 if (dest
->dest
.is_ssa
)
157 src
.src
= nir_src_for_ssa(&dest
->dest
.ssa
);
159 assert(!dest
->dest
.reg
.indirect
);
160 src
.src
= nir_src_for_reg(dest
->dest
.reg
.reg
);
161 src
.src
.reg
.base_offset
= dest
->dest
.reg
.base_offset
;
164 for (int i
= 0; i
< 4; i
++)
167 return nir_fmov_alu(b
, src
, 4);
171 ttn_emit_declaration(struct ttn_compile
*c
)
173 nir_builder
*b
= &c
->build
;
174 struct tgsi_full_declaration
*decl
= &c
->token
->FullDeclaration
;
175 unsigned array_size
= decl
->Range
.Last
- decl
->Range
.First
+ 1;
176 unsigned file
= decl
->Declaration
.File
;
179 if (file
== TGSI_FILE_TEMPORARY
) {
180 if (decl
->Declaration
.Array
) {
181 /* for arrays, we create variables instead of registers: */
182 nir_variable
*var
= rzalloc(b
->shader
, nir_variable
);
184 var
->type
= glsl_array_type(glsl_vec4_type(), array_size
);
185 var
->data
.mode
= nir_var_global
;
186 var
->name
= ralloc_asprintf(var
, "arr_%d", decl
->Array
.ArrayID
);
188 exec_list_push_tail(&b
->shader
->globals
, &var
->node
);
190 for (i
= 0; i
< array_size
; i
++) {
191 /* point all the matching slots to the same var,
192 * with appropriate offset set, mostly just so
193 * we know what to do when tgsi does a non-indirect
196 c
->temp_regs
[decl
->Range
.First
+ i
].reg
= NULL
;
197 c
->temp_regs
[decl
->Range
.First
+ i
].var
= var
;
198 c
->temp_regs
[decl
->Range
.First
+ i
].offset
= i
;
201 for (i
= 0; i
< array_size
; i
++) {
202 nir_register
*reg
= nir_local_reg_create(b
->impl
);
203 reg
->num_components
= 4;
204 c
->temp_regs
[decl
->Range
.First
+ i
].reg
= reg
;
205 c
->temp_regs
[decl
->Range
.First
+ i
].var
= NULL
;
206 c
->temp_regs
[decl
->Range
.First
+ i
].offset
= 0;
209 } else if (file
== TGSI_FILE_ADDRESS
) {
210 c
->addr_reg
= nir_local_reg_create(b
->impl
);
211 c
->addr_reg
->num_components
= 4;
212 } else if (file
== TGSI_FILE_SYSTEM_VALUE
) {
213 /* Nothing to record for system values. */
214 } else if (file
== TGSI_FILE_SAMPLER
) {
215 /* Nothing to record for samplers. */
216 } else if (file
== TGSI_FILE_SAMPLER_VIEW
) {
217 struct tgsi_declaration_sampler_view
*sview
= &decl
->SamplerView
;
220 assert((sview
->ReturnTypeX
== sview
->ReturnTypeY
) &&
221 (sview
->ReturnTypeX
== sview
->ReturnTypeZ
) &&
222 (sview
->ReturnTypeX
== sview
->ReturnTypeW
));
224 switch (sview
->ReturnTypeX
) {
225 case TGSI_RETURN_TYPE_SINT
:
228 case TGSI_RETURN_TYPE_UINT
:
229 type
= nir_type_uint
;
231 case TGSI_RETURN_TYPE_FLOAT
:
233 type
= nir_type_float
;
237 for (i
= 0; i
< array_size
; i
++) {
238 c
->samp_types
[decl
->Range
.First
+ i
] = type
;
241 bool is_array
= (array_size
> 1);
243 assert(file
== TGSI_FILE_INPUT
||
244 file
== TGSI_FILE_OUTPUT
||
245 file
== TGSI_FILE_CONSTANT
);
247 /* nothing to do for UBOs: */
248 if ((file
== TGSI_FILE_CONSTANT
) && decl
->Declaration
.Dimension
&&
249 decl
->Dim
.Index2D
!= 0) {
250 b
->shader
->info
.num_ubos
=
251 MAX2(b
->shader
->info
.num_ubos
, decl
->Dim
.Index2D
);
255 if ((file
== TGSI_FILE_INPUT
) || (file
== TGSI_FILE_OUTPUT
)) {
256 is_array
= (is_array
&& decl
->Declaration
.Array
&&
257 (decl
->Array
.ArrayID
!= 0));
260 for (i
= 0; i
< array_size
; i
++) {
261 unsigned idx
= decl
->Range
.First
+ i
;
262 nir_variable
*var
= rzalloc(b
->shader
, nir_variable
);
264 var
->data
.driver_location
= idx
;
266 var
->type
= glsl_vec4_type();
268 var
->type
= glsl_array_type(var
->type
, array_size
);
271 case TGSI_FILE_INPUT
:
272 var
->data
.read_only
= true;
273 var
->data
.mode
= nir_var_shader_in
;
274 var
->name
= ralloc_asprintf(var
, "in_%d", idx
);
276 if (c
->scan
->processor
== PIPE_SHADER_FRAGMENT
) {
277 if (decl
->Semantic
.Name
== TGSI_SEMANTIC_FACE
) {
278 var
->data
.location
= SYSTEM_VALUE_FRONT_FACE
;
279 var
->data
.mode
= nir_var_system_value
;
282 tgsi_varying_semantic_to_slot(decl
->Semantic
.Name
,
283 decl
->Semantic
.Index
);
286 assert(!decl
->Declaration
.Semantic
);
287 var
->data
.location
= VERT_ATTRIB_GENERIC0
+ idx
;
291 /* We definitely need to translate the interpolation field, because
292 * nir_print will decode it.
294 switch (decl
->Interp
.Interpolate
) {
295 case TGSI_INTERPOLATE_CONSTANT
:
296 var
->data
.interpolation
= INTERP_MODE_FLAT
;
298 case TGSI_INTERPOLATE_LINEAR
:
299 var
->data
.interpolation
= INTERP_MODE_NOPERSPECTIVE
;
301 case TGSI_INTERPOLATE_PERSPECTIVE
:
302 var
->data
.interpolation
= INTERP_MODE_SMOOTH
;
306 exec_list_push_tail(&b
->shader
->inputs
, &var
->node
);
307 c
->inputs
[idx
] = var
;
309 for (int i
= 0; i
< array_size
; i
++)
310 b
->shader
->info
.inputs_read
|= 1 << (var
->data
.location
+ i
);
313 case TGSI_FILE_OUTPUT
: {
314 int semantic_name
= decl
->Semantic
.Name
;
315 int semantic_index
= decl
->Semantic
.Index
;
316 /* Since we can't load from outputs in the IR, we make temporaries
317 * for the outputs and emit stores to the real outputs at the end of
320 nir_register
*reg
= nir_local_reg_create(b
->impl
);
321 reg
->num_components
= 4;
323 reg
->num_array_elems
= array_size
;
325 var
->data
.mode
= nir_var_shader_out
;
326 var
->name
= ralloc_asprintf(var
, "out_%d", idx
);
329 if (c
->scan
->processor
== PIPE_SHADER_FRAGMENT
) {
330 switch (semantic_name
) {
331 case TGSI_SEMANTIC_COLOR
: {
332 /* TODO tgsi loses some information, so we cannot
333 * actually differentiate here between DSB and MRT
334 * at this point. But so far no drivers using tgsi-
335 * to-nir support dual source blend:
337 bool dual_src_blend
= false;
338 if (dual_src_blend
&& (semantic_index
== 1)) {
339 var
->data
.location
= FRAG_RESULT_DATA0
;
342 if (c
->scan
->properties
[TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS
])
343 var
->data
.location
= FRAG_RESULT_COLOR
;
345 var
->data
.location
= FRAG_RESULT_DATA0
+ semantic_index
;
349 case TGSI_SEMANTIC_POSITION
:
350 var
->data
.location
= FRAG_RESULT_DEPTH
;
351 var
->type
= glsl_float_type();
354 fprintf(stderr
, "Bad TGSI semantic: %d/%d\n",
355 decl
->Semantic
.Name
, decl
->Semantic
.Index
);
360 tgsi_varying_semantic_to_slot(semantic_name
, semantic_index
);
365 for (j
= 0; j
< array_size
; j
++) {
366 c
->output_regs
[idx
+ j
].offset
= i
+ j
;
367 c
->output_regs
[idx
+ j
].reg
= reg
;
370 c
->output_regs
[idx
].offset
= i
;
371 c
->output_regs
[idx
].reg
= reg
;
374 exec_list_push_tail(&b
->shader
->outputs
, &var
->node
);
375 c
->outputs
[idx
] = var
;
377 for (int i
= 0; i
< array_size
; i
++)
378 b
->shader
->info
.outputs_written
|= 1 << (var
->data
.location
+ i
);
381 case TGSI_FILE_CONSTANT
:
382 var
->data
.mode
= nir_var_uniform
;
383 var
->name
= ralloc_asprintf(var
, "uniform_%d", idx
);
385 exec_list_push_tail(&b
->shader
->uniforms
, &var
->node
);
388 unreachable("bad declaration file");
400 ttn_emit_immediate(struct ttn_compile
*c
)
402 nir_builder
*b
= &c
->build
;
403 struct tgsi_full_immediate
*tgsi_imm
= &c
->token
->FullImmediate
;
404 nir_load_const_instr
*load_const
;
407 load_const
= nir_load_const_instr_create(b
->shader
, 4, 32);
408 c
->imm_defs
[c
->next_imm
] = &load_const
->def
;
411 for (i
= 0; i
< 4; i
++)
412 load_const
->value
.u32
[i
] = tgsi_imm
->u
[i
].Uint
;
414 nir_builder_instr_insert(b
, &load_const
->instr
);
418 ttn_src_for_indirect(struct ttn_compile
*c
, struct tgsi_ind_register
*indirect
);
420 /* generate either a constant or indirect deref chain for accessing an
423 static nir_deref_instr
*
424 ttn_array_deref(struct ttn_compile
*c
, nir_variable
*var
, unsigned offset
,
425 struct tgsi_ind_register
*indirect
)
427 nir_deref_instr
*deref
= nir_build_deref_var(&c
->build
, var
);
428 nir_ssa_def
*index
= nir_imm_int(&c
->build
, offset
);
430 index
= nir_iadd(&c
->build
, index
, ttn_src_for_indirect(c
, indirect
));
431 return nir_build_deref_array(&c
->build
, deref
, index
);
435 ttn_src_for_file_and_index(struct ttn_compile
*c
, unsigned file
, unsigned index
,
436 struct tgsi_ind_register
*indirect
,
437 struct tgsi_dimension
*dim
,
438 struct tgsi_ind_register
*dimind
)
440 nir_builder
*b
= &c
->build
;
443 memset(&src
, 0, sizeof(src
));
446 case TGSI_FILE_TEMPORARY
:
447 if (c
->temp_regs
[index
].var
) {
448 unsigned offset
= c
->temp_regs
[index
].offset
;
449 nir_variable
*var
= c
->temp_regs
[index
].var
;
450 nir_ssa_def
*load
= nir_load_deref(&c
->build
,
451 ttn_array_deref(c
, var
, offset
, indirect
));
453 src
= nir_src_for_ssa(load
);
456 src
.reg
.reg
= c
->temp_regs
[index
].reg
;
461 case TGSI_FILE_ADDRESS
:
462 src
.reg
.reg
= c
->addr_reg
;
466 case TGSI_FILE_IMMEDIATE
:
467 src
= nir_src_for_ssa(c
->imm_defs
[index
]);
472 case TGSI_FILE_SYSTEM_VALUE
: {
473 nir_intrinsic_instr
*load
;
480 switch (c
->scan
->system_value_semantic_name
[index
]) {
481 case TGSI_SEMANTIC_VERTEXID_NOBASE
:
482 op
= nir_intrinsic_load_vertex_id_zero_base
;
484 case TGSI_SEMANTIC_VERTEXID
:
485 op
= nir_intrinsic_load_vertex_id
;
487 case TGSI_SEMANTIC_BASEVERTEX
:
488 op
= nir_intrinsic_load_base_vertex
;
490 case TGSI_SEMANTIC_INSTANCEID
:
491 op
= nir_intrinsic_load_instance_id
;
494 unreachable("bad system value");
497 load
= nir_intrinsic_instr_create(b
->shader
, op
);
498 load
->num_components
= ncomp
;
500 nir_ssa_dest_init(&load
->instr
, &load
->dest
, ncomp
, 32, NULL
);
501 nir_builder_instr_insert(b
, &load
->instr
);
503 src
= nir_src_for_ssa(&load
->dest
.ssa
);
505 b
->shader
->info
.system_values_read
|=
506 (1 << nir_system_value_from_intrinsic(op
));
511 case TGSI_FILE_INPUT
:
512 /* Special case: Turn the frontface varying into a load of the
513 * frontface intrinsic plus math, and appending the silly floats.
515 if (c
->scan
->processor
== PIPE_SHADER_FRAGMENT
&&
516 c
->scan
->input_semantic_name
[index
] == TGSI_SEMANTIC_FACE
) {
517 nir_ssa_def
*tgsi_frontface
[4] = {
519 nir_load_system_value(&c
->build
,
520 nir_intrinsic_load_front_face
, 0),
521 nir_imm_float(&c
->build
, 1.0),
522 nir_imm_float(&c
->build
, -1.0)),
523 nir_imm_float(&c
->build
, 0.0),
524 nir_imm_float(&c
->build
, 0.0),
525 nir_imm_float(&c
->build
, 1.0),
528 return nir_src_for_ssa(nir_vec(&c
->build
, tgsi_frontface
, 4));
530 /* Indirection on input arrays isn't supported by TTN. */
532 nir_deref_instr
*deref
= nir_build_deref_var(&c
->build
,
534 return nir_src_for_ssa(nir_load_deref(&c
->build
, deref
));
538 case TGSI_FILE_CONSTANT
: {
539 nir_intrinsic_instr
*load
;
543 if (dim
&& (dim
->Index
> 0 || dim
->Indirect
)) {
544 op
= nir_intrinsic_load_ubo
;
546 op
= nir_intrinsic_load_uniform
;
549 load
= nir_intrinsic_instr_create(b
->shader
, op
);
551 load
->num_components
= 4;
552 if (dim
&& (dim
->Index
> 0 || dim
->Indirect
)) {
555 ttn_src_for_file_and_index(c
, dimind
->File
, dimind
->Index
,
558 /* UBOs start at index 1 in TGSI: */
560 nir_src_for_ssa(nir_imm_int(b
, dim
->Index
- 1));
566 if (op
== nir_intrinsic_load_ubo
) {
567 /* UBO loads don't have a base offset. */
568 offset
= nir_imm_int(b
, index
);
570 offset
= nir_iadd(b
, offset
, ttn_src_for_indirect(c
, indirect
));
572 /* UBO offsets are in bytes, but TGSI gives them to us in vec4's */
573 offset
= nir_ishl(b
, offset
, nir_imm_int(b
, 4));
575 nir_intrinsic_set_base(load
, index
);
577 offset
= ttn_src_for_indirect(c
, indirect
);
579 offset
= nir_imm_int(b
, 0);
582 load
->src
[srcn
++] = nir_src_for_ssa(offset
);
584 nir_ssa_dest_init(&load
->instr
, &load
->dest
, 4, 32, NULL
);
585 nir_builder_instr_insert(b
, &load
->instr
);
587 src
= nir_src_for_ssa(&load
->dest
.ssa
);
592 unreachable("bad src file");
600 ttn_src_for_indirect(struct ttn_compile
*c
, struct tgsi_ind_register
*indirect
)
602 nir_builder
*b
= &c
->build
;
604 memset(&src
, 0, sizeof(src
));
605 for (int i
= 0; i
< 4; i
++)
606 src
.swizzle
[i
] = indirect
->Swizzle
;
607 src
.src
= ttn_src_for_file_and_index(c
,
611 return nir_imov_alu(b
, src
, 1);
615 ttn_get_dest(struct ttn_compile
*c
, struct tgsi_full_dst_register
*tgsi_fdst
)
617 struct tgsi_dst_register
*tgsi_dst
= &tgsi_fdst
->Register
;
619 unsigned index
= tgsi_dst
->Index
;
621 memset(&dest
, 0, sizeof(dest
));
623 if (tgsi_dst
->File
== TGSI_FILE_TEMPORARY
) {
624 if (c
->temp_regs
[index
].var
) {
627 /* this works, because TGSI will give us a base offset
628 * (in case of indirect index) that points back into
629 * the array. Access can be direct or indirect, we
630 * don't really care. Just create a one-shot dst reg
631 * that will get store_var'd back into the array var
632 * at the end of ttn_emit_instruction()
634 reg
= nir_local_reg_create(c
->build
.impl
);
635 reg
->num_components
= 4;
636 dest
.dest
.reg
.reg
= reg
;
637 dest
.dest
.reg
.base_offset
= 0;
639 assert(!tgsi_dst
->Indirect
);
640 dest
.dest
.reg
.reg
= c
->temp_regs
[index
].reg
;
641 dest
.dest
.reg
.base_offset
= c
->temp_regs
[index
].offset
;
643 } else if (tgsi_dst
->File
== TGSI_FILE_OUTPUT
) {
644 dest
.dest
.reg
.reg
= c
->output_regs
[index
].reg
;
645 dest
.dest
.reg
.base_offset
= c
->output_regs
[index
].offset
;
646 } else if (tgsi_dst
->File
== TGSI_FILE_ADDRESS
) {
648 dest
.dest
.reg
.reg
= c
->addr_reg
;
651 dest
.write_mask
= tgsi_dst
->WriteMask
;
652 dest
.saturate
= false;
654 if (tgsi_dst
->Indirect
&& (tgsi_dst
->File
!= TGSI_FILE_TEMPORARY
)) {
655 nir_src
*indirect
= ralloc(c
->build
.shader
, nir_src
);
656 *indirect
= nir_src_for_ssa(ttn_src_for_indirect(c
, &tgsi_fdst
->Indirect
));
657 dest
.dest
.reg
.indirect
= indirect
;
663 static nir_variable
*
664 ttn_get_var(struct ttn_compile
*c
, struct tgsi_full_dst_register
*tgsi_fdst
)
666 struct tgsi_dst_register
*tgsi_dst
= &tgsi_fdst
->Register
;
667 unsigned index
= tgsi_dst
->Index
;
669 if (tgsi_dst
->File
== TGSI_FILE_TEMPORARY
) {
670 /* we should not have an indirect when there is no var! */
671 if (!c
->temp_regs
[index
].var
)
672 assert(!tgsi_dst
->Indirect
);
673 return c
->temp_regs
[index
].var
;
680 ttn_get_src(struct ttn_compile
*c
, struct tgsi_full_src_register
*tgsi_fsrc
,
683 nir_builder
*b
= &c
->build
;
684 struct tgsi_src_register
*tgsi_src
= &tgsi_fsrc
->Register
;
685 enum tgsi_opcode opcode
= c
->token
->FullInstruction
.Instruction
.Opcode
;
686 unsigned tgsi_src_type
= tgsi_opcode_infer_src_type(opcode
, src_idx
);
687 bool src_is_float
= !(tgsi_src_type
== TGSI_TYPE_SIGNED
||
688 tgsi_src_type
== TGSI_TYPE_UNSIGNED
);
691 memset(&src
, 0, sizeof(src
));
693 if (tgsi_src
->File
== TGSI_FILE_NULL
) {
694 return nir_imm_float(b
, 0.0);
695 } else if (tgsi_src
->File
== TGSI_FILE_SAMPLER
) {
696 /* Only the index of the sampler gets used in texturing, and it will
697 * handle looking that up on its own instead of using the nir_alu_src.
699 assert(!tgsi_src
->Indirect
);
702 struct tgsi_ind_register
*ind
= NULL
;
703 struct tgsi_dimension
*dim
= NULL
;
704 struct tgsi_ind_register
*dimind
= NULL
;
705 if (tgsi_src
->Indirect
)
706 ind
= &tgsi_fsrc
->Indirect
;
707 if (tgsi_src
->Dimension
) {
708 dim
= &tgsi_fsrc
->Dimension
;
710 dimind
= &tgsi_fsrc
->DimIndirect
;
712 src
.src
= ttn_src_for_file_and_index(c
,
718 src
.swizzle
[0] = tgsi_src
->SwizzleX
;
719 src
.swizzle
[1] = tgsi_src
->SwizzleY
;
720 src
.swizzle
[2] = tgsi_src
->SwizzleZ
;
721 src
.swizzle
[3] = tgsi_src
->SwizzleW
;
723 nir_ssa_def
*def
= nir_fmov_alu(b
, src
, 4);
725 if (tgsi_src
->Absolute
) {
727 def
= nir_fabs(b
, def
);
729 def
= nir_iabs(b
, def
);
732 if (tgsi_src
->Negate
) {
734 def
= nir_fneg(b
, def
);
736 def
= nir_ineg(b
, def
);
743 ttn_alu(nir_builder
*b
, nir_op op
, nir_alu_dest dest
, nir_ssa_def
**src
)
745 unsigned num_srcs
= nir_op_infos
[op
].num_inputs
;
746 nir_alu_instr
*instr
= nir_alu_instr_create(b
->shader
, op
);
749 for (i
= 0; i
< num_srcs
; i
++)
750 instr
->src
[i
].src
= nir_src_for_ssa(src
[i
]);
753 nir_builder_instr_insert(b
, &instr
->instr
);
757 ttn_move_dest_masked(nir_builder
*b
, nir_alu_dest dest
,
758 nir_ssa_def
*def
, unsigned write_mask
)
760 if (!(dest
.write_mask
& write_mask
))
763 nir_alu_instr
*mov
= nir_alu_instr_create(b
->shader
, nir_op_imov
);
765 mov
->dest
.write_mask
&= write_mask
;
766 mov
->src
[0].src
= nir_src_for_ssa(def
);
767 for (unsigned i
= def
->num_components
; i
< 4; i
++)
768 mov
->src
[0].swizzle
[i
] = def
->num_components
- 1;
769 nir_builder_instr_insert(b
, &mov
->instr
);
773 ttn_move_dest(nir_builder
*b
, nir_alu_dest dest
, nir_ssa_def
*def
)
775 ttn_move_dest_masked(b
, dest
, def
, TGSI_WRITEMASK_XYZW
);
779 ttn_arl(nir_builder
*b
, nir_op op
, nir_alu_dest dest
, nir_ssa_def
**src
)
781 ttn_move_dest(b
, dest
, nir_f2i32(b
, nir_ffloor(b
, src
[0])));
784 /* EXP - Approximate Exponential Base 2
785 * dst.x = 2^{\lfloor src.x\rfloor}
786 * dst.y = src.x - \lfloor src.x\rfloor
791 ttn_exp(nir_builder
*b
, nir_op op
, nir_alu_dest dest
, nir_ssa_def
**src
)
793 nir_ssa_def
*srcx
= ttn_channel(b
, src
[0], X
);
795 ttn_move_dest_masked(b
, dest
, nir_fexp2(b
, nir_ffloor(b
, srcx
)),
797 ttn_move_dest_masked(b
, dest
, nir_fsub(b
, srcx
, nir_ffloor(b
, srcx
)),
799 ttn_move_dest_masked(b
, dest
, nir_fexp2(b
, srcx
), TGSI_WRITEMASK_Z
);
800 ttn_move_dest_masked(b
, dest
, nir_imm_float(b
, 1.0), TGSI_WRITEMASK_W
);
803 /* LOG - Approximate Logarithm Base 2
804 * dst.x = \lfloor\log_2{|src.x|}\rfloor
805 * dst.y = \frac{|src.x|}{2^{\lfloor\log_2{|src.x|}\rfloor}}
806 * dst.z = \log_2{|src.x|}
810 ttn_log(nir_builder
*b
, nir_op op
, nir_alu_dest dest
, nir_ssa_def
**src
)
812 nir_ssa_def
*abs_srcx
= nir_fabs(b
, ttn_channel(b
, src
[0], X
));
813 nir_ssa_def
*log2
= nir_flog2(b
, abs_srcx
);
815 ttn_move_dest_masked(b
, dest
, nir_ffloor(b
, log2
), TGSI_WRITEMASK_X
);
816 ttn_move_dest_masked(b
, dest
,
817 nir_fdiv(b
, abs_srcx
, nir_fexp2(b
, nir_ffloor(b
, log2
))),
819 ttn_move_dest_masked(b
, dest
, nir_flog2(b
, abs_srcx
), TGSI_WRITEMASK_Z
);
820 ttn_move_dest_masked(b
, dest
, nir_imm_float(b
, 1.0), TGSI_WRITEMASK_W
);
823 /* DST - Distance Vector
825 * dst.y = src0.y \times src1.y
830 ttn_dst(nir_builder
*b
, nir_op op
, nir_alu_dest dest
, nir_ssa_def
**src
)
832 ttn_move_dest_masked(b
, dest
, nir_imm_float(b
, 1.0), TGSI_WRITEMASK_X
);
833 ttn_move_dest_masked(b
, dest
, nir_fmul(b
, src
[0], src
[1]), TGSI_WRITEMASK_Y
);
834 ttn_move_dest_masked(b
, dest
, nir_fmov(b
, src
[0]), TGSI_WRITEMASK_Z
);
835 ttn_move_dest_masked(b
, dest
, nir_fmov(b
, src
[1]), TGSI_WRITEMASK_W
);
838 /* LIT - Light Coefficients
840 * dst.y = max(src.x, 0.0)
841 * dst.z = (src.x > 0.0) ? max(src.y, 0.0)^{clamp(src.w, -128.0, 128.0))} : 0
845 ttn_lit(nir_builder
*b
, nir_op op
, nir_alu_dest dest
, nir_ssa_def
**src
)
847 ttn_move_dest_masked(b
, dest
, nir_imm_float(b
, 1.0), TGSI_WRITEMASK_XW
);
849 ttn_move_dest_masked(b
, dest
, nir_fmax(b
, ttn_channel(b
, src
[0], X
),
850 nir_imm_float(b
, 0.0)), TGSI_WRITEMASK_Y
);
852 if (dest
.write_mask
& TGSI_WRITEMASK_Z
) {
853 nir_ssa_def
*src0_y
= ttn_channel(b
, src
[0], Y
);
854 nir_ssa_def
*wclamp
= nir_fmax(b
, nir_fmin(b
, ttn_channel(b
, src
[0], W
),
855 nir_imm_float(b
, 128.0)),
856 nir_imm_float(b
, -128.0));
857 nir_ssa_def
*pow
= nir_fpow(b
, nir_fmax(b
, src0_y
, nir_imm_float(b
, 0.0)),
860 ttn_move_dest_masked(b
, dest
,
863 nir_imm_float(b
, 0.0),
864 ttn_channel(b
, src
[0], X
)),
865 nir_imm_float(b
, 0.0),
872 ttn_sle(nir_builder
*b
, nir_op op
, nir_alu_dest dest
, nir_ssa_def
**src
)
874 ttn_move_dest(b
, dest
, nir_sge(b
, src
[1], src
[0]));
878 ttn_sgt(nir_builder
*b
, nir_op op
, nir_alu_dest dest
, nir_ssa_def
**src
)
880 ttn_move_dest(b
, dest
, nir_slt(b
, src
[1], src
[0]));
884 ttn_dp2(nir_builder
*b
, nir_op op
, nir_alu_dest dest
, nir_ssa_def
**src
)
886 ttn_move_dest(b
, dest
, nir_fdot2(b
, src
[0], src
[1]));
890 ttn_dp3(nir_builder
*b
, nir_op op
, nir_alu_dest dest
, nir_ssa_def
**src
)
892 ttn_move_dest(b
, dest
, nir_fdot3(b
, src
[0], src
[1]));
896 ttn_dp4(nir_builder
*b
, nir_op op
, nir_alu_dest dest
, nir_ssa_def
**src
)
898 ttn_move_dest(b
, dest
, nir_fdot4(b
, src
[0], src
[1]));
902 ttn_umad(nir_builder
*b
, nir_op op
, nir_alu_dest dest
, nir_ssa_def
**src
)
904 ttn_move_dest(b
, dest
, nir_iadd(b
, nir_imul(b
, src
[0], src
[1]), src
[2]));
908 ttn_arr(nir_builder
*b
, nir_op op
, nir_alu_dest dest
, nir_ssa_def
**src
)
910 ttn_move_dest(b
, dest
, nir_ffloor(b
, nir_fadd(b
, src
[0], nir_imm_float(b
, 0.5))));
914 ttn_cmp(nir_builder
*b
, nir_op op
, nir_alu_dest dest
, nir_ssa_def
**src
)
916 ttn_move_dest(b
, dest
, nir_bcsel(b
,
917 nir_flt(b
, src
[0], nir_imm_float(b
, 0.0)),
922 ttn_ucmp(nir_builder
*b
, nir_op op
, nir_alu_dest dest
, nir_ssa_def
**src
)
924 ttn_move_dest(b
, dest
, nir_bcsel(b
,
925 nir_ine(b
, src
[0], nir_imm_int(b
, 0)),
930 ttn_kill(nir_builder
*b
, nir_op op
, nir_alu_dest dest
, nir_ssa_def
**src
)
932 nir_intrinsic_instr
*discard
=
933 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_discard
);
934 nir_builder_instr_insert(b
, &discard
->instr
);
935 b
->shader
->info
.fs
.uses_discard
= true;
939 ttn_kill_if(nir_builder
*b
, nir_op op
, nir_alu_dest dest
, nir_ssa_def
**src
)
941 nir_ssa_def
*cmp
= nir_bany(b
, nir_flt(b
, src
[0], nir_imm_float(b
, 0.0)));
942 nir_intrinsic_instr
*discard
=
943 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_discard_if
);
944 discard
->src
[0] = nir_src_for_ssa(cmp
);
945 nir_builder_instr_insert(b
, &discard
->instr
);
946 b
->shader
->info
.fs
.uses_discard
= true;
950 ttn_if(struct ttn_compile
*c
, nir_ssa_def
*src
, bool is_uint
)
952 nir_builder
*b
= &c
->build
;
954 src
= ttn_channel(b
, src
, X
);
956 nir_if
*if_stmt
= nir_if_create(b
->shader
);
958 if_stmt
->condition
= nir_src_for_ssa(nir_ine(b
, src
, nir_imm_int(b
, 0)));
960 if_stmt
->condition
= nir_src_for_ssa(nir_fne(b
, src
, nir_imm_int(b
, 0)));
962 nir_builder_cf_insert(b
, &if_stmt
->cf_node
);
964 c
->if_stack
[c
->if_stack_pos
] = nir_after_cf_node(&if_stmt
->cf_node
);
967 b
->cursor
= nir_after_cf_list(&if_stmt
->then_list
);
969 c
->if_stack
[c
->if_stack_pos
] = nir_after_cf_list(&if_stmt
->else_list
);
974 ttn_else(struct ttn_compile
*c
)
976 nir_builder
*b
= &c
->build
;
978 b
->cursor
= c
->if_stack
[c
->if_stack_pos
- 1];
982 ttn_endif(struct ttn_compile
*c
)
984 nir_builder
*b
= &c
->build
;
986 c
->if_stack_pos
-= 2;
987 b
->cursor
= c
->if_stack
[c
->if_stack_pos
];
991 ttn_bgnloop(struct ttn_compile
*c
)
993 nir_builder
*b
= &c
->build
;
995 nir_loop
*loop
= nir_loop_create(b
->shader
);
996 nir_builder_cf_insert(b
, &loop
->cf_node
);
998 c
->loop_stack
[c
->loop_stack_pos
] = nir_after_cf_node(&loop
->cf_node
);
1001 b
->cursor
= nir_after_cf_list(&loop
->body
);
1005 ttn_cont(nir_builder
*b
)
1007 nir_jump_instr
*instr
= nir_jump_instr_create(b
->shader
, nir_jump_continue
);
1008 nir_builder_instr_insert(b
, &instr
->instr
);
1012 ttn_brk(nir_builder
*b
)
1014 nir_jump_instr
*instr
= nir_jump_instr_create(b
->shader
, nir_jump_break
);
1015 nir_builder_instr_insert(b
, &instr
->instr
);
1019 ttn_endloop(struct ttn_compile
*c
)
1021 nir_builder
*b
= &c
->build
;
1023 c
->loop_stack_pos
--;
1024 b
->cursor
= c
->loop_stack
[c
->loop_stack_pos
];
1028 setup_texture_info(nir_tex_instr
*instr
, unsigned texture
)
1031 case TGSI_TEXTURE_BUFFER
:
1032 instr
->sampler_dim
= GLSL_SAMPLER_DIM_BUF
;
1034 case TGSI_TEXTURE_1D
:
1035 instr
->sampler_dim
= GLSL_SAMPLER_DIM_1D
;
1037 case TGSI_TEXTURE_1D_ARRAY
:
1038 instr
->sampler_dim
= GLSL_SAMPLER_DIM_1D
;
1039 instr
->is_array
= true;
1041 case TGSI_TEXTURE_SHADOW1D
:
1042 instr
->sampler_dim
= GLSL_SAMPLER_DIM_1D
;
1043 instr
->is_shadow
= true;
1045 case TGSI_TEXTURE_SHADOW1D_ARRAY
:
1046 instr
->sampler_dim
= GLSL_SAMPLER_DIM_1D
;
1047 instr
->is_shadow
= true;
1048 instr
->is_array
= true;
1050 case TGSI_TEXTURE_2D
:
1051 instr
->sampler_dim
= GLSL_SAMPLER_DIM_2D
;
1053 case TGSI_TEXTURE_2D_ARRAY
:
1054 instr
->sampler_dim
= GLSL_SAMPLER_DIM_2D
;
1055 instr
->is_array
= true;
1057 case TGSI_TEXTURE_2D_MSAA
:
1058 instr
->sampler_dim
= GLSL_SAMPLER_DIM_MS
;
1060 case TGSI_TEXTURE_2D_ARRAY_MSAA
:
1061 instr
->sampler_dim
= GLSL_SAMPLER_DIM_MS
;
1062 instr
->is_array
= true;
1064 case TGSI_TEXTURE_SHADOW2D
:
1065 instr
->sampler_dim
= GLSL_SAMPLER_DIM_2D
;
1066 instr
->is_shadow
= true;
1068 case TGSI_TEXTURE_SHADOW2D_ARRAY
:
1069 instr
->sampler_dim
= GLSL_SAMPLER_DIM_2D
;
1070 instr
->is_shadow
= true;
1071 instr
->is_array
= true;
1073 case TGSI_TEXTURE_3D
:
1074 instr
->sampler_dim
= GLSL_SAMPLER_DIM_3D
;
1076 case TGSI_TEXTURE_CUBE
:
1077 instr
->sampler_dim
= GLSL_SAMPLER_DIM_CUBE
;
1079 case TGSI_TEXTURE_CUBE_ARRAY
:
1080 instr
->sampler_dim
= GLSL_SAMPLER_DIM_CUBE
;
1081 instr
->is_array
= true;
1083 case TGSI_TEXTURE_SHADOWCUBE
:
1084 instr
->sampler_dim
= GLSL_SAMPLER_DIM_CUBE
;
1085 instr
->is_shadow
= true;
1087 case TGSI_TEXTURE_SHADOWCUBE_ARRAY
:
1088 instr
->sampler_dim
= GLSL_SAMPLER_DIM_CUBE
;
1089 instr
->is_shadow
= true;
1090 instr
->is_array
= true;
1092 case TGSI_TEXTURE_RECT
:
1093 instr
->sampler_dim
= GLSL_SAMPLER_DIM_RECT
;
1095 case TGSI_TEXTURE_SHADOWRECT
:
1096 instr
->sampler_dim
= GLSL_SAMPLER_DIM_RECT
;
1097 instr
->is_shadow
= true;
1100 fprintf(stderr
, "Unknown TGSI texture target %d\n", texture
);
1106 ttn_tex(struct ttn_compile
*c
, nir_alu_dest dest
, nir_ssa_def
**src
)
1108 nir_builder
*b
= &c
->build
;
1109 struct tgsi_full_instruction
*tgsi_inst
= &c
->token
->FullInstruction
;
1110 nir_tex_instr
*instr
;
1112 unsigned num_srcs
, samp
= 1, sview
, i
;
1114 switch (tgsi_inst
->Instruction
.Opcode
) {
1115 case TGSI_OPCODE_TEX
:
1119 case TGSI_OPCODE_TEX2
:
1124 case TGSI_OPCODE_TXP
:
1128 case TGSI_OPCODE_TXB
:
1132 case TGSI_OPCODE_TXB2
:
1137 case TGSI_OPCODE_TXL
:
1141 case TGSI_OPCODE_TXL2
:
1146 case TGSI_OPCODE_TXF
:
1147 if (tgsi_inst
->Texture
.Texture
== TGSI_TEXTURE_2D_MSAA
||
1148 tgsi_inst
->Texture
.Texture
== TGSI_TEXTURE_2D_ARRAY_MSAA
) {
1149 op
= nir_texop_txf_ms
;
1155 case TGSI_OPCODE_TXD
:
1160 case TGSI_OPCODE_LODQ
:
1166 fprintf(stderr
, "unknown TGSI tex op %d\n", tgsi_inst
->Instruction
.Opcode
);
1170 if (tgsi_inst
->Texture
.Texture
== TGSI_TEXTURE_SHADOW1D
||
1171 tgsi_inst
->Texture
.Texture
== TGSI_TEXTURE_SHADOW1D_ARRAY
||
1172 tgsi_inst
->Texture
.Texture
== TGSI_TEXTURE_SHADOW2D
||
1173 tgsi_inst
->Texture
.Texture
== TGSI_TEXTURE_SHADOW2D_ARRAY
||
1174 tgsi_inst
->Texture
.Texture
== TGSI_TEXTURE_SHADOWRECT
||
1175 tgsi_inst
->Texture
.Texture
== TGSI_TEXTURE_SHADOWCUBE
||
1176 tgsi_inst
->Texture
.Texture
== TGSI_TEXTURE_SHADOWCUBE_ARRAY
) {
1180 num_srcs
+= tgsi_inst
->Texture
.NumOffsets
;
1182 instr
= nir_tex_instr_create(b
->shader
, num_srcs
);
1185 setup_texture_info(instr
, tgsi_inst
->Texture
.Texture
);
1187 switch (instr
->sampler_dim
) {
1188 case GLSL_SAMPLER_DIM_1D
:
1189 case GLSL_SAMPLER_DIM_BUF
:
1190 instr
->coord_components
= 1;
1192 case GLSL_SAMPLER_DIM_2D
:
1193 case GLSL_SAMPLER_DIM_RECT
:
1194 case GLSL_SAMPLER_DIM_EXTERNAL
:
1195 case GLSL_SAMPLER_DIM_MS
:
1196 instr
->coord_components
= 2;
1198 case GLSL_SAMPLER_DIM_3D
:
1199 case GLSL_SAMPLER_DIM_CUBE
:
1200 instr
->coord_components
= 3;
1202 case GLSL_SAMPLER_DIM_SUBPASS
:
1203 case GLSL_SAMPLER_DIM_SUBPASS_MS
:
1204 unreachable("invalid sampler_dim");
1207 if (instr
->is_array
)
1208 instr
->coord_components
++;
1210 assert(tgsi_inst
->Src
[samp
].Register
.File
== TGSI_FILE_SAMPLER
);
1211 instr
->texture_index
= tgsi_inst
->Src
[samp
].Register
.Index
;
1212 instr
->sampler_index
= tgsi_inst
->Src
[samp
].Register
.Index
;
1214 /* TODO if we supported any opc's which take an explicit SVIEW
1215 * src, we would use that here instead. But for the "legacy"
1216 * texture opc's the SVIEW index is same as SAMP index:
1218 sview
= instr
->texture_index
;
1220 if (op
== nir_texop_lod
) {
1221 instr
->dest_type
= nir_type_float
;
1222 } else if (sview
< c
->num_samp_types
) {
1223 instr
->dest_type
= c
->samp_types
[sview
];
1225 instr
->dest_type
= nir_type_float
;
1228 unsigned src_number
= 0;
1230 instr
->src
[src_number
].src
=
1231 nir_src_for_ssa(nir_swizzle(b
, src
[0], SWIZ(X
, Y
, Z
, W
),
1232 instr
->coord_components
, false));
1233 instr
->src
[src_number
].src_type
= nir_tex_src_coord
;
1236 if (tgsi_inst
->Instruction
.Opcode
== TGSI_OPCODE_TXP
) {
1237 instr
->src
[src_number
].src
= nir_src_for_ssa(ttn_channel(b
, src
[0], W
));
1238 instr
->src
[src_number
].src_type
= nir_tex_src_projector
;
1242 if (tgsi_inst
->Instruction
.Opcode
== TGSI_OPCODE_TXB
) {
1243 instr
->src
[src_number
].src
= nir_src_for_ssa(ttn_channel(b
, src
[0], W
));
1244 instr
->src
[src_number
].src_type
= nir_tex_src_bias
;
1248 if (tgsi_inst
->Instruction
.Opcode
== TGSI_OPCODE_TXB2
) {
1249 instr
->src
[src_number
].src
= nir_src_for_ssa(ttn_channel(b
, src
[1], X
));
1250 instr
->src
[src_number
].src_type
= nir_tex_src_bias
;
1254 if (tgsi_inst
->Instruction
.Opcode
== TGSI_OPCODE_TXL
) {
1255 instr
->src
[src_number
].src
= nir_src_for_ssa(ttn_channel(b
, src
[0], W
));
1256 instr
->src
[src_number
].src_type
= nir_tex_src_lod
;
1260 if (tgsi_inst
->Instruction
.Opcode
== TGSI_OPCODE_TXL2
) {
1261 instr
->src
[src_number
].src
= nir_src_for_ssa(ttn_channel(b
, src
[1], X
));
1262 instr
->src
[src_number
].src_type
= nir_tex_src_lod
;
1266 if (tgsi_inst
->Instruction
.Opcode
== TGSI_OPCODE_TXF
) {
1267 instr
->src
[src_number
].src
= nir_src_for_ssa(ttn_channel(b
, src
[0], W
));
1268 if (op
== nir_texop_txf_ms
)
1269 instr
->src
[src_number
].src_type
= nir_tex_src_ms_index
;
1271 instr
->src
[src_number
].src_type
= nir_tex_src_lod
;
1275 if (tgsi_inst
->Instruction
.Opcode
== TGSI_OPCODE_TXD
) {
1276 instr
->src
[src_number
].src_type
= nir_tex_src_ddx
;
1277 instr
->src
[src_number
].src
=
1278 nir_src_for_ssa(nir_swizzle(b
, src
[1], SWIZ(X
, Y
, Z
, W
),
1279 nir_tex_instr_src_size(instr
, src_number
),
1282 instr
->src
[src_number
].src_type
= nir_tex_src_ddy
;
1283 instr
->src
[src_number
].src
=
1284 nir_src_for_ssa(nir_swizzle(b
, src
[2], SWIZ(X
, Y
, Z
, W
),
1285 nir_tex_instr_src_size(instr
, src_number
),
1290 if (instr
->is_shadow
) {
1291 if (instr
->coord_components
== 4)
1292 instr
->src
[src_number
].src
= nir_src_for_ssa(ttn_channel(b
, src
[1], X
));
1293 else if (instr
->coord_components
== 3)
1294 instr
->src
[src_number
].src
= nir_src_for_ssa(ttn_channel(b
, src
[0], W
));
1296 instr
->src
[src_number
].src
= nir_src_for_ssa(ttn_channel(b
, src
[0], Z
));
1298 instr
->src
[src_number
].src_type
= nir_tex_src_comparator
;
1302 for (i
= 0; i
< tgsi_inst
->Texture
.NumOffsets
; i
++) {
1303 struct tgsi_texture_offset
*tex_offset
= &tgsi_inst
->TexOffsets
[i
];
1304 /* since TexOffset ins't using tgsi_full_src_register we get to
1305 * do some extra gymnastics:
1309 memset(&src
, 0, sizeof(src
));
1311 src
.src
= ttn_src_for_file_and_index(c
,
1316 src
.swizzle
[0] = tex_offset
->SwizzleX
;
1317 src
.swizzle
[1] = tex_offset
->SwizzleY
;
1318 src
.swizzle
[2] = tex_offset
->SwizzleZ
;
1319 src
.swizzle
[3] = TGSI_SWIZZLE_W
;
1321 instr
->src
[src_number
].src_type
= nir_tex_src_offset
;
1322 instr
->src
[src_number
].src
= nir_src_for_ssa(
1323 nir_fmov_alu(b
, src
, nir_tex_instr_src_size(instr
, src_number
)));
1327 assert(src_number
== num_srcs
);
1329 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
,
1330 nir_tex_instr_dest_size(instr
),
1332 nir_builder_instr_insert(b
, &instr
->instr
);
1334 /* Resolve the writemask on the texture op. */
1335 ttn_move_dest(b
, dest
, &instr
->dest
.ssa
);
1338 /* TGSI_OPCODE_TXQ is actually two distinct operations:
1340 * dst.x = texture\_width(unit, lod)
1341 * dst.y = texture\_height(unit, lod)
1342 * dst.z = texture\_depth(unit, lod)
1343 * dst.w = texture\_levels(unit)
1345 * dst.xyz map to NIR txs opcode, and dst.w maps to query_levels
1348 ttn_txq(struct ttn_compile
*c
, nir_alu_dest dest
, nir_ssa_def
**src
)
1350 nir_builder
*b
= &c
->build
;
1351 struct tgsi_full_instruction
*tgsi_inst
= &c
->token
->FullInstruction
;
1352 nir_tex_instr
*txs
, *qlv
;
1354 txs
= nir_tex_instr_create(b
->shader
, 1);
1355 txs
->op
= nir_texop_txs
;
1356 setup_texture_info(txs
, tgsi_inst
->Texture
.Texture
);
1358 qlv
= nir_tex_instr_create(b
->shader
, 0);
1359 qlv
->op
= nir_texop_query_levels
;
1360 setup_texture_info(qlv
, tgsi_inst
->Texture
.Texture
);
1362 assert(tgsi_inst
->Src
[1].Register
.File
== TGSI_FILE_SAMPLER
);
1363 txs
->texture_index
= tgsi_inst
->Src
[1].Register
.Index
;
1364 qlv
->texture_index
= tgsi_inst
->Src
[1].Register
.Index
;
1366 /* only single src, the lod: */
1367 txs
->src
[0].src
= nir_src_for_ssa(ttn_channel(b
, src
[0], X
));
1368 txs
->src
[0].src_type
= nir_tex_src_lod
;
1370 nir_ssa_dest_init(&txs
->instr
, &txs
->dest
,
1371 nir_tex_instr_dest_size(txs
), 32, NULL
);
1372 nir_builder_instr_insert(b
, &txs
->instr
);
1374 nir_ssa_dest_init(&qlv
->instr
, &qlv
->dest
, 1, 32, NULL
);
1375 nir_builder_instr_insert(b
, &qlv
->instr
);
1377 ttn_move_dest_masked(b
, dest
, &txs
->dest
.ssa
, TGSI_WRITEMASK_XYZ
);
1378 ttn_move_dest_masked(b
, dest
, &qlv
->dest
.ssa
, TGSI_WRITEMASK_W
);
1381 static const nir_op op_trans
[TGSI_OPCODE_LAST
] = {
1382 [TGSI_OPCODE_ARL
] = 0,
1383 [TGSI_OPCODE_MOV
] = nir_op_fmov
,
1384 [TGSI_OPCODE_LIT
] = 0,
1385 [TGSI_OPCODE_RCP
] = nir_op_frcp
,
1386 [TGSI_OPCODE_RSQ
] = nir_op_frsq
,
1387 [TGSI_OPCODE_EXP
] = 0,
1388 [TGSI_OPCODE_LOG
] = 0,
1389 [TGSI_OPCODE_MUL
] = nir_op_fmul
,
1390 [TGSI_OPCODE_ADD
] = nir_op_fadd
,
1391 [TGSI_OPCODE_DP3
] = 0,
1392 [TGSI_OPCODE_DP4
] = 0,
1393 [TGSI_OPCODE_DST
] = 0,
1394 [TGSI_OPCODE_MIN
] = nir_op_fmin
,
1395 [TGSI_OPCODE_MAX
] = nir_op_fmax
,
1396 [TGSI_OPCODE_SLT
] = nir_op_slt
,
1397 [TGSI_OPCODE_SGE
] = nir_op_sge
,
1398 [TGSI_OPCODE_MAD
] = nir_op_ffma
,
1399 [TGSI_OPCODE_LRP
] = 0,
1400 [TGSI_OPCODE_SQRT
] = nir_op_fsqrt
,
1401 [TGSI_OPCODE_FRC
] = nir_op_ffract
,
1402 [TGSI_OPCODE_FLR
] = nir_op_ffloor
,
1403 [TGSI_OPCODE_ROUND
] = nir_op_fround_even
,
1404 [TGSI_OPCODE_EX2
] = nir_op_fexp2
,
1405 [TGSI_OPCODE_LG2
] = nir_op_flog2
,
1406 [TGSI_OPCODE_POW
] = nir_op_fpow
,
1407 [TGSI_OPCODE_COS
] = nir_op_fcos
,
1408 [TGSI_OPCODE_DDX
] = nir_op_fddx
,
1409 [TGSI_OPCODE_DDY
] = nir_op_fddy
,
1410 [TGSI_OPCODE_KILL
] = 0,
1411 [TGSI_OPCODE_PK2H
] = 0, /* XXX */
1412 [TGSI_OPCODE_PK2US
] = 0, /* XXX */
1413 [TGSI_OPCODE_PK4B
] = 0, /* XXX */
1414 [TGSI_OPCODE_PK4UB
] = 0, /* XXX */
1415 [TGSI_OPCODE_SEQ
] = nir_op_seq
,
1416 [TGSI_OPCODE_SGT
] = 0,
1417 [TGSI_OPCODE_SIN
] = nir_op_fsin
,
1418 [TGSI_OPCODE_SNE
] = nir_op_sne
,
1419 [TGSI_OPCODE_SLE
] = 0,
1420 [TGSI_OPCODE_TEX
] = 0,
1421 [TGSI_OPCODE_TXD
] = 0,
1422 [TGSI_OPCODE_TXP
] = 0,
1423 [TGSI_OPCODE_UP2H
] = 0, /* XXX */
1424 [TGSI_OPCODE_UP2US
] = 0, /* XXX */
1425 [TGSI_OPCODE_UP4B
] = 0, /* XXX */
1426 [TGSI_OPCODE_UP4UB
] = 0, /* XXX */
1427 [TGSI_OPCODE_ARR
] = 0,
1429 /* No function calls, yet. */
1430 [TGSI_OPCODE_CAL
] = 0, /* XXX */
1431 [TGSI_OPCODE_RET
] = 0, /* XXX */
1433 [TGSI_OPCODE_SSG
] = nir_op_fsign
,
1434 [TGSI_OPCODE_CMP
] = 0,
1435 [TGSI_OPCODE_TXB
] = 0,
1436 [TGSI_OPCODE_DIV
] = nir_op_fdiv
,
1437 [TGSI_OPCODE_DP2
] = 0,
1438 [TGSI_OPCODE_TXL
] = 0,
1440 [TGSI_OPCODE_BRK
] = 0,
1441 [TGSI_OPCODE_IF
] = 0,
1442 [TGSI_OPCODE_UIF
] = 0,
1443 [TGSI_OPCODE_ELSE
] = 0,
1444 [TGSI_OPCODE_ENDIF
] = 0,
1446 [TGSI_OPCODE_DDX_FINE
] = nir_op_fddx_fine
,
1447 [TGSI_OPCODE_DDY_FINE
] = nir_op_fddy_fine
,
1449 [TGSI_OPCODE_CEIL
] = nir_op_fceil
,
1450 [TGSI_OPCODE_I2F
] = nir_op_i2f32
,
1451 [TGSI_OPCODE_NOT
] = nir_op_inot
,
1452 [TGSI_OPCODE_TRUNC
] = nir_op_ftrunc
,
1453 [TGSI_OPCODE_SHL
] = nir_op_ishl
,
1454 [TGSI_OPCODE_AND
] = nir_op_iand
,
1455 [TGSI_OPCODE_OR
] = nir_op_ior
,
1456 [TGSI_OPCODE_MOD
] = nir_op_umod
,
1457 [TGSI_OPCODE_XOR
] = nir_op_ixor
,
1458 [TGSI_OPCODE_TXF
] = 0,
1459 [TGSI_OPCODE_TXQ
] = 0,
1461 [TGSI_OPCODE_CONT
] = 0,
1463 [TGSI_OPCODE_EMIT
] = 0, /* XXX */
1464 [TGSI_OPCODE_ENDPRIM
] = 0, /* XXX */
1466 [TGSI_OPCODE_BGNLOOP
] = 0,
1467 [TGSI_OPCODE_BGNSUB
] = 0, /* XXX: no function calls */
1468 [TGSI_OPCODE_ENDLOOP
] = 0,
1469 [TGSI_OPCODE_ENDSUB
] = 0, /* XXX: no function calls */
1471 [TGSI_OPCODE_NOP
] = 0,
1472 [TGSI_OPCODE_FSEQ
] = nir_op_feq32
,
1473 [TGSI_OPCODE_FSGE
] = nir_op_fge32
,
1474 [TGSI_OPCODE_FSLT
] = nir_op_flt32
,
1475 [TGSI_OPCODE_FSNE
] = nir_op_fne32
,
1477 [TGSI_OPCODE_KILL_IF
] = 0,
1479 [TGSI_OPCODE_END
] = 0,
1481 [TGSI_OPCODE_F2I
] = nir_op_f2i32
,
1482 [TGSI_OPCODE_IDIV
] = nir_op_idiv
,
1483 [TGSI_OPCODE_IMAX
] = nir_op_imax
,
1484 [TGSI_OPCODE_IMIN
] = nir_op_imin
,
1485 [TGSI_OPCODE_INEG
] = nir_op_ineg
,
1486 [TGSI_OPCODE_ISGE
] = nir_op_ige32
,
1487 [TGSI_OPCODE_ISHR
] = nir_op_ishr
,
1488 [TGSI_OPCODE_ISLT
] = nir_op_ilt32
,
1489 [TGSI_OPCODE_F2U
] = nir_op_f2u32
,
1490 [TGSI_OPCODE_U2F
] = nir_op_u2f32
,
1491 [TGSI_OPCODE_UADD
] = nir_op_iadd
,
1492 [TGSI_OPCODE_UDIV
] = nir_op_udiv
,
1493 [TGSI_OPCODE_UMAD
] = 0,
1494 [TGSI_OPCODE_UMAX
] = nir_op_umax
,
1495 [TGSI_OPCODE_UMIN
] = nir_op_umin
,
1496 [TGSI_OPCODE_UMOD
] = nir_op_umod
,
1497 [TGSI_OPCODE_UMUL
] = nir_op_imul
,
1498 [TGSI_OPCODE_USEQ
] = nir_op_ieq32
,
1499 [TGSI_OPCODE_USGE
] = nir_op_uge32
,
1500 [TGSI_OPCODE_USHR
] = nir_op_ushr
,
1501 [TGSI_OPCODE_USLT
] = nir_op_ult32
,
1502 [TGSI_OPCODE_USNE
] = nir_op_ine32
,
1504 [TGSI_OPCODE_SWITCH
] = 0, /* not emitted by glsl_to_tgsi.cpp */
1505 [TGSI_OPCODE_CASE
] = 0, /* not emitted by glsl_to_tgsi.cpp */
1506 [TGSI_OPCODE_DEFAULT
] = 0, /* not emitted by glsl_to_tgsi.cpp */
1507 [TGSI_OPCODE_ENDSWITCH
] = 0, /* not emitted by glsl_to_tgsi.cpp */
1509 /* XXX: SAMPLE opcodes */
1511 [TGSI_OPCODE_UARL
] = nir_op_imov
,
1512 [TGSI_OPCODE_UCMP
] = 0,
1513 [TGSI_OPCODE_IABS
] = nir_op_iabs
,
1514 [TGSI_OPCODE_ISSG
] = nir_op_isign
,
1518 [TGSI_OPCODE_TEX2
] = 0,
1519 [TGSI_OPCODE_TXB2
] = 0,
1520 [TGSI_OPCODE_TXL2
] = 0,
1522 [TGSI_OPCODE_IMUL_HI
] = nir_op_imul_high
,
1523 [TGSI_OPCODE_UMUL_HI
] = nir_op_umul_high
,
1525 [TGSI_OPCODE_TG4
] = 0,
1526 [TGSI_OPCODE_LODQ
] = 0,
1528 [TGSI_OPCODE_IBFE
] = nir_op_ibitfield_extract
,
1529 [TGSI_OPCODE_UBFE
] = nir_op_ubitfield_extract
,
1530 [TGSI_OPCODE_BFI
] = nir_op_bitfield_insert
,
1531 [TGSI_OPCODE_BREV
] = nir_op_bitfield_reverse
,
1532 [TGSI_OPCODE_POPC
] = nir_op_bit_count
,
1533 [TGSI_OPCODE_LSB
] = nir_op_find_lsb
,
1534 [TGSI_OPCODE_IMSB
] = nir_op_ifind_msb
,
1535 [TGSI_OPCODE_UMSB
] = nir_op_ufind_msb
,
1537 [TGSI_OPCODE_INTERP_CENTROID
] = 0, /* XXX */
1538 [TGSI_OPCODE_INTERP_SAMPLE
] = 0, /* XXX */
1539 [TGSI_OPCODE_INTERP_OFFSET
] = 0, /* XXX */
1543 ttn_emit_instruction(struct ttn_compile
*c
)
1545 nir_builder
*b
= &c
->build
;
1546 struct tgsi_full_instruction
*tgsi_inst
= &c
->token
->FullInstruction
;
1548 unsigned tgsi_op
= tgsi_inst
->Instruction
.Opcode
;
1549 struct tgsi_full_dst_register
*tgsi_dst
= &tgsi_inst
->Dst
[0];
1551 if (tgsi_op
== TGSI_OPCODE_END
)
1554 nir_ssa_def
*src
[TGSI_FULL_MAX_SRC_REGISTERS
];
1555 for (i
= 0; i
< tgsi_inst
->Instruction
.NumSrcRegs
; i
++) {
1556 src
[i
] = ttn_get_src(c
, &tgsi_inst
->Src
[i
], i
);
1558 nir_alu_dest dest
= ttn_get_dest(c
, tgsi_dst
);
1561 case TGSI_OPCODE_RSQ
:
1562 ttn_move_dest(b
, dest
, nir_frsq(b
, ttn_channel(b
, src
[0], X
)));
1565 case TGSI_OPCODE_SQRT
:
1566 ttn_move_dest(b
, dest
, nir_fsqrt(b
, ttn_channel(b
, src
[0], X
)));
1569 case TGSI_OPCODE_RCP
:
1570 ttn_move_dest(b
, dest
, nir_frcp(b
, ttn_channel(b
, src
[0], X
)));
1573 case TGSI_OPCODE_EX2
:
1574 ttn_move_dest(b
, dest
, nir_fexp2(b
, ttn_channel(b
, src
[0], X
)));
1577 case TGSI_OPCODE_LG2
:
1578 ttn_move_dest(b
, dest
, nir_flog2(b
, ttn_channel(b
, src
[0], X
)));
1581 case TGSI_OPCODE_POW
:
1582 ttn_move_dest(b
, dest
, nir_fpow(b
,
1583 ttn_channel(b
, src
[0], X
),
1584 ttn_channel(b
, src
[1], X
)));
1587 case TGSI_OPCODE_COS
:
1588 ttn_move_dest(b
, dest
, nir_fcos(b
, ttn_channel(b
, src
[0], X
)));
1591 case TGSI_OPCODE_SIN
:
1592 ttn_move_dest(b
, dest
, nir_fsin(b
, ttn_channel(b
, src
[0], X
)));
1595 case TGSI_OPCODE_ARL
:
1596 ttn_arl(b
, op_trans
[tgsi_op
], dest
, src
);
1599 case TGSI_OPCODE_EXP
:
1600 ttn_exp(b
, op_trans
[tgsi_op
], dest
, src
);
1603 case TGSI_OPCODE_LOG
:
1604 ttn_log(b
, op_trans
[tgsi_op
], dest
, src
);
1607 case TGSI_OPCODE_DST
:
1608 ttn_dst(b
, op_trans
[tgsi_op
], dest
, src
);
1611 case TGSI_OPCODE_LIT
:
1612 ttn_lit(b
, op_trans
[tgsi_op
], dest
, src
);
1615 case TGSI_OPCODE_DP2
:
1616 ttn_dp2(b
, op_trans
[tgsi_op
], dest
, src
);
1619 case TGSI_OPCODE_DP3
:
1620 ttn_dp3(b
, op_trans
[tgsi_op
], dest
, src
);
1623 case TGSI_OPCODE_DP4
:
1624 ttn_dp4(b
, op_trans
[tgsi_op
], dest
, src
);
1627 case TGSI_OPCODE_UMAD
:
1628 ttn_umad(b
, op_trans
[tgsi_op
], dest
, src
);
1631 case TGSI_OPCODE_LRP
:
1632 ttn_move_dest(b
, dest
, nir_flrp(b
, src
[2], src
[1], src
[0]));
1635 case TGSI_OPCODE_KILL
:
1636 ttn_kill(b
, op_trans
[tgsi_op
], dest
, src
);
1639 case TGSI_OPCODE_ARR
:
1640 ttn_arr(b
, op_trans
[tgsi_op
], dest
, src
);
1643 case TGSI_OPCODE_CMP
:
1644 ttn_cmp(b
, op_trans
[tgsi_op
], dest
, src
);
1647 case TGSI_OPCODE_UCMP
:
1648 ttn_ucmp(b
, op_trans
[tgsi_op
], dest
, src
);
1651 case TGSI_OPCODE_SGT
:
1652 ttn_sgt(b
, op_trans
[tgsi_op
], dest
, src
);
1655 case TGSI_OPCODE_SLE
:
1656 ttn_sle(b
, op_trans
[tgsi_op
], dest
, src
);
1659 case TGSI_OPCODE_KILL_IF
:
1660 ttn_kill_if(b
, op_trans
[tgsi_op
], dest
, src
);
1663 case TGSI_OPCODE_TEX
:
1664 case TGSI_OPCODE_TXP
:
1665 case TGSI_OPCODE_TXL
:
1666 case TGSI_OPCODE_TXB
:
1667 case TGSI_OPCODE_TXD
:
1668 case TGSI_OPCODE_TEX2
:
1669 case TGSI_OPCODE_TXL2
:
1670 case TGSI_OPCODE_TXB2
:
1671 case TGSI_OPCODE_TXF
:
1672 case TGSI_OPCODE_TG4
:
1673 case TGSI_OPCODE_LODQ
:
1674 ttn_tex(c
, dest
, src
);
1677 case TGSI_OPCODE_TXQ
:
1678 ttn_txq(c
, dest
, src
);
1681 case TGSI_OPCODE_NOP
:
1684 case TGSI_OPCODE_IF
:
1685 ttn_if(c
, src
[0], false);
1688 case TGSI_OPCODE_UIF
:
1689 ttn_if(c
, src
[0], true);
1692 case TGSI_OPCODE_ELSE
:
1696 case TGSI_OPCODE_ENDIF
:
1700 case TGSI_OPCODE_BGNLOOP
:
1704 case TGSI_OPCODE_BRK
:
1708 case TGSI_OPCODE_CONT
:
1712 case TGSI_OPCODE_ENDLOOP
:
1717 if (op_trans
[tgsi_op
] != 0 || tgsi_op
== TGSI_OPCODE_MOV
) {
1718 ttn_alu(b
, op_trans
[tgsi_op
], dest
, src
);
1720 fprintf(stderr
, "unknown TGSI opcode: %s\n",
1721 tgsi_get_opcode_name(tgsi_op
));
1727 if (tgsi_inst
->Instruction
.Saturate
) {
1728 assert(!dest
.dest
.is_ssa
);
1729 ttn_move_dest(b
, dest
, nir_fsat(b
, ttn_src_for_dest(b
, &dest
)));
1732 /* if the dst has a matching var, append store_var to move
1733 * output from reg to var
1735 nir_variable
*var
= ttn_get_var(c
, tgsi_dst
);
1737 unsigned index
= tgsi_dst
->Register
.Index
;
1738 unsigned offset
= c
->temp_regs
[index
].offset
;
1739 struct tgsi_ind_register
*indirect
= tgsi_dst
->Register
.Indirect
?
1740 &tgsi_dst
->Indirect
: NULL
;
1741 nir_src val
= nir_src_for_reg(dest
.dest
.reg
.reg
);
1742 nir_store_deref(b
, ttn_array_deref(c
, var
, offset
, indirect
),
1743 nir_ssa_for_src(b
, val
, 4), dest
.write_mask
);
1748 * Puts a NIR intrinsic to store of each TGSI_FILE_OUTPUT value to the output
1749 * variables at the end of the shader.
1751 * We don't generate these incrementally as the TGSI_FILE_OUTPUT values are
1752 * written, because there's no output load intrinsic, which means we couldn't
1753 * handle writemasks.
1756 ttn_add_output_stores(struct ttn_compile
*c
)
1758 nir_builder
*b
= &c
->build
;
1760 for (int i
= 0; i
< c
->build
.shader
->num_outputs
; i
++) {
1761 nir_variable
*var
= c
->outputs
[i
];
1765 nir_src src
= nir_src_for_reg(c
->output_regs
[i
].reg
);
1766 src
.reg
.base_offset
= c
->output_regs
[i
].offset
;
1768 nir_ssa_def
*store_value
= nir_ssa_for_src(b
, src
, 4);
1769 if (c
->build
.shader
->info
.stage
== MESA_SHADER_FRAGMENT
&&
1770 var
->data
.location
== FRAG_RESULT_DEPTH
) {
1771 /* TGSI uses TGSI_SEMANTIC_POSITION.z for the depth output, while
1772 * NIR uses a single float FRAG_RESULT_DEPTH.
1774 store_value
= nir_channel(b
, store_value
, 2);
1777 nir_store_deref(b
, nir_build_deref_var(b
, var
), store_value
,
1778 (1 << store_value
->num_components
) - 1);
1783 tgsi_to_nir(const void *tgsi_tokens
,
1784 const nir_shader_compiler_options
*options
)
1786 struct tgsi_parse_context parser
;
1787 struct tgsi_shader_info scan
;
1788 struct ttn_compile
*c
;
1789 struct nir_shader
*s
;
1792 c
= rzalloc(NULL
, struct ttn_compile
);
1794 tgsi_scan_shader(tgsi_tokens
, &scan
);
1797 nir_builder_init_simple_shader(&c
->build
, NULL
,
1798 tgsi_processor_to_shader_stage(scan
.processor
),
1800 s
= c
->build
.shader
;
1802 s
->num_inputs
= scan
.file_max
[TGSI_FILE_INPUT
] + 1;
1803 s
->num_uniforms
= scan
.const_file_max
[0] + 1;
1804 s
->num_outputs
= scan
.file_max
[TGSI_FILE_OUTPUT
] + 1;
1806 c
->inputs
= rzalloc_array(c
, struct nir_variable
*, s
->num_inputs
);
1807 c
->outputs
= rzalloc_array(c
, struct nir_variable
*, s
->num_outputs
);
1809 c
->output_regs
= rzalloc_array(c
, struct ttn_reg_info
,
1810 scan
.file_max
[TGSI_FILE_OUTPUT
] + 1);
1811 c
->temp_regs
= rzalloc_array(c
, struct ttn_reg_info
,
1812 scan
.file_max
[TGSI_FILE_TEMPORARY
] + 1);
1813 c
->imm_defs
= rzalloc_array(c
, nir_ssa_def
*,
1814 scan
.file_max
[TGSI_FILE_IMMEDIATE
] + 1);
1816 c
->num_samp_types
= scan
.file_max
[TGSI_FILE_SAMPLER_VIEW
] + 1;
1817 c
->samp_types
= rzalloc_array(c
, nir_alu_type
, c
->num_samp_types
);
1819 c
->if_stack
= rzalloc_array(c
, nir_cursor
,
1820 (scan
.opcode_count
[TGSI_OPCODE_IF
] +
1821 scan
.opcode_count
[TGSI_OPCODE_UIF
]) * 2);
1822 c
->loop_stack
= rzalloc_array(c
, nir_cursor
,
1823 scan
.opcode_count
[TGSI_OPCODE_BGNLOOP
]);
1825 ret
= tgsi_parse_init(&parser
, tgsi_tokens
);
1826 assert(ret
== TGSI_PARSE_OK
);
1828 while (!tgsi_parse_end_of_tokens(&parser
)) {
1829 tgsi_parse_token(&parser
);
1830 c
->token
= &parser
.FullToken
;
1832 switch (parser
.FullToken
.Token
.Type
) {
1833 case TGSI_TOKEN_TYPE_DECLARATION
:
1834 ttn_emit_declaration(c
);
1837 case TGSI_TOKEN_TYPE_INSTRUCTION
:
1838 ttn_emit_instruction(c
);
1841 case TGSI_TOKEN_TYPE_IMMEDIATE
:
1842 ttn_emit_immediate(c
);
1847 tgsi_parse_free(&parser
);
1849 ttn_add_output_stores(c
);