2 * Copyright © 2014-2015 Broadcom
3 * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "util/ralloc.h"
26 #include "glsl/nir/nir.h"
27 #include "glsl/nir/nir_control_flow.h"
28 #include "glsl/nir/nir_builder.h"
29 #include "glsl/list.h"
30 #include "glsl/nir/shader_enums.h"
32 #include "nir/tgsi_to_nir.h"
33 #include "tgsi/tgsi_parse.h"
34 #include "tgsi/tgsi_dump.h"
35 #include "tgsi/tgsi_info.h"
36 #include "tgsi/tgsi_scan.h"
38 #define SWIZ(X, Y, Z, W) (unsigned[4]){ \
46 /** nir register containing this TGSI index. */
49 /** Offset (in vec4s) from the start of var for this TGSI index. */
54 union tgsi_full_token
*token
;
56 struct tgsi_shader_info
*scan
;
58 struct ttn_reg_info
*output_regs
;
59 struct ttn_reg_info
*temp_regs
;
60 nir_ssa_def
**imm_defs
;
62 unsigned num_samp_types
;
63 nir_alu_type
*samp_types
;
65 nir_register
*addr_reg
;
68 * Stack of nir_cursors where instructions should be pushed as we pop
69 * back out of the control flow stack.
71 * For each IF/ELSE/ENDIF block, if_stack[if_stack_pos] has where the else
72 * instructions should be placed, and if_stack[if_stack_pos - 1] has where
73 * the next instructions outside of the if/then/else block go.
76 unsigned if_stack_pos
;
79 * Stack of nir_cursors where instructions should be pushed as we pop
80 * back out of the control flow stack.
82 * loop_stack[loop_stack_pos - 1] contains the cf_node_list for the outside
85 nir_cursor
*loop_stack
;
86 unsigned loop_stack_pos
;
88 /* How many TGSI_FILE_IMMEDIATE vec4s have been parsed so far. */
92 #define ttn_swizzle(b, src, x, y, z, w) \
93 nir_swizzle(b, src, SWIZ(x, y, z, w), 4, false)
94 #define ttn_channel(b, src, swiz) \
95 nir_swizzle(b, src, SWIZ(swiz, swiz, swiz, swiz), 1, false)
97 static gl_varying_slot
98 tgsi_varying_semantic_to_slot(unsigned semantic
, unsigned index
)
101 case TGSI_SEMANTIC_POSITION
:
102 return VARYING_SLOT_POS
;
103 case TGSI_SEMANTIC_COLOR
:
105 return VARYING_SLOT_COL0
;
107 return VARYING_SLOT_COL1
;
108 case TGSI_SEMANTIC_BCOLOR
:
110 return VARYING_SLOT_BFC0
;
112 return VARYING_SLOT_BFC1
;
113 case TGSI_SEMANTIC_FOG
:
114 return VARYING_SLOT_FOGC
;
115 case TGSI_SEMANTIC_PSIZE
:
116 return VARYING_SLOT_PSIZ
;
117 case TGSI_SEMANTIC_GENERIC
:
118 return VARYING_SLOT_VAR0
+ index
;
119 case TGSI_SEMANTIC_FACE
:
120 return VARYING_SLOT_FACE
;
121 case TGSI_SEMANTIC_EDGEFLAG
:
122 return VARYING_SLOT_EDGE
;
123 case TGSI_SEMANTIC_PRIMID
:
124 return VARYING_SLOT_PRIMITIVE_ID
;
125 case TGSI_SEMANTIC_CLIPDIST
:
127 return VARYING_SLOT_CLIP_DIST0
;
129 return VARYING_SLOT_CLIP_DIST1
;
130 case TGSI_SEMANTIC_CLIPVERTEX
:
131 return VARYING_SLOT_CLIP_VERTEX
;
132 case TGSI_SEMANTIC_TEXCOORD
:
133 return VARYING_SLOT_TEX0
+ index
;
134 case TGSI_SEMANTIC_PCOORD
:
135 return VARYING_SLOT_PNTC
;
136 case TGSI_SEMANTIC_VIEWPORT_INDEX
:
137 return VARYING_SLOT_VIEWPORT
;
138 case TGSI_SEMANTIC_LAYER
:
139 return VARYING_SLOT_LAYER
;
141 fprintf(stderr
, "Bad TGSI semantic: %d/%d\n", semantic
, index
);
146 /* Temporary helper to remap back to TGSI style semantic name/index
147 * values, for use in drivers that haven't been converted to using
151 varying_slot_to_tgsi_semantic(gl_varying_slot slot
,
152 unsigned *semantic_name
, unsigned *semantic_index
)
154 static const unsigned map
[][2] = {
155 [VARYING_SLOT_POS
] = { TGSI_SEMANTIC_POSITION
, 0 },
156 [VARYING_SLOT_COL0
] = { TGSI_SEMANTIC_COLOR
, 0 },
157 [VARYING_SLOT_COL1
] = { TGSI_SEMANTIC_COLOR
, 1 },
158 [VARYING_SLOT_BFC0
] = { TGSI_SEMANTIC_BCOLOR
, 0 },
159 [VARYING_SLOT_BFC1
] = { TGSI_SEMANTIC_BCOLOR
, 1 },
160 [VARYING_SLOT_FOGC
] = { TGSI_SEMANTIC_FOG
, 0 },
161 [VARYING_SLOT_PSIZ
] = { TGSI_SEMANTIC_PSIZE
, 0 },
162 [VARYING_SLOT_FACE
] = { TGSI_SEMANTIC_FACE
, 0 },
163 [VARYING_SLOT_EDGE
] = { TGSI_SEMANTIC_EDGEFLAG
, 0 },
164 [VARYING_SLOT_PRIMITIVE_ID
] = { TGSI_SEMANTIC_PRIMID
, 0 },
165 [VARYING_SLOT_CLIP_DIST0
] = { TGSI_SEMANTIC_CLIPDIST
, 0 },
166 [VARYING_SLOT_CLIP_DIST1
] = { TGSI_SEMANTIC_CLIPDIST
, 1 },
167 [VARYING_SLOT_CLIP_VERTEX
] = { TGSI_SEMANTIC_CLIPVERTEX
, 0 },
168 [VARYING_SLOT_PNTC
] = { TGSI_SEMANTIC_PCOORD
, 0 },
169 [VARYING_SLOT_VIEWPORT
] = { TGSI_SEMANTIC_VIEWPORT_INDEX
, 0 },
170 [VARYING_SLOT_LAYER
] = { TGSI_SEMANTIC_LAYER
, 0 },
173 if (slot
>= VARYING_SLOT_VAR0
) {
174 *semantic_name
= TGSI_SEMANTIC_GENERIC
;
175 *semantic_index
= slot
- VARYING_SLOT_VAR0
;
179 if (slot
>= VARYING_SLOT_TEX0
&& slot
<= VARYING_SLOT_TEX7
) {
180 *semantic_name
= TGSI_SEMANTIC_TEXCOORD
;
181 *semantic_index
= slot
- VARYING_SLOT_TEX0
;
185 if (slot
>= ARRAY_SIZE(map
)) {
186 fprintf(stderr
, "Unknown varying slot %d\n", slot
);
190 *semantic_name
= map
[slot
][0];
191 *semantic_index
= map
[slot
][1];
194 /* Temporary helper to remap back to TGSI style semantic name/index
195 * values, for use in drivers that haven't been converted to using
199 frag_result_to_tgsi_semantic(gl_frag_result slot
,
200 unsigned *semantic_name
, unsigned *semantic_index
)
202 static const unsigned map
[][2] = {
203 [FRAG_RESULT_DEPTH
] = { TGSI_SEMANTIC_POSITION
, 0 },
204 [FRAG_RESULT_COLOR
] = { TGSI_SEMANTIC_COLOR
, -1 },
205 [FRAG_RESULT_DATA0
+ 0] = { TGSI_SEMANTIC_COLOR
, 0 },
206 [FRAG_RESULT_DATA0
+ 1] = { TGSI_SEMANTIC_COLOR
, 1 },
207 [FRAG_RESULT_DATA0
+ 2] = { TGSI_SEMANTIC_COLOR
, 2 },
208 [FRAG_RESULT_DATA0
+ 3] = { TGSI_SEMANTIC_COLOR
, 3 },
209 [FRAG_RESULT_DATA0
+ 4] = { TGSI_SEMANTIC_COLOR
, 4 },
210 [FRAG_RESULT_DATA0
+ 5] = { TGSI_SEMANTIC_COLOR
, 5 },
211 [FRAG_RESULT_DATA0
+ 6] = { TGSI_SEMANTIC_COLOR
, 6 },
212 [FRAG_RESULT_DATA0
+ 7] = { TGSI_SEMANTIC_COLOR
, 7 },
215 *semantic_name
= map
[slot
][0];
216 *semantic_index
= map
[slot
][1];
220 ttn_src_for_dest(nir_builder
*b
, nir_alu_dest
*dest
)
223 memset(&src
, 0, sizeof(src
));
225 if (dest
->dest
.is_ssa
)
226 src
.src
= nir_src_for_ssa(&dest
->dest
.ssa
);
228 assert(!dest
->dest
.reg
.indirect
);
229 src
.src
= nir_src_for_reg(dest
->dest
.reg
.reg
);
230 src
.src
.reg
.base_offset
= dest
->dest
.reg
.base_offset
;
233 for (int i
= 0; i
< 4; i
++)
236 return nir_fmov_alu(b
, src
, 4);
240 ttn_emit_declaration(struct ttn_compile
*c
)
242 nir_builder
*b
= &c
->build
;
243 struct tgsi_full_declaration
*decl
= &c
->token
->FullDeclaration
;
244 unsigned array_size
= decl
->Range
.Last
- decl
->Range
.First
+ 1;
245 unsigned file
= decl
->Declaration
.File
;
248 if (file
== TGSI_FILE_TEMPORARY
) {
249 if (decl
->Declaration
.Array
) {
250 /* for arrays, we create variables instead of registers: */
251 nir_variable
*var
= rzalloc(b
->shader
, nir_variable
);
253 var
->type
= glsl_array_type(glsl_vec4_type(), array_size
);
254 var
->data
.mode
= nir_var_global
;
255 var
->name
= ralloc_asprintf(var
, "arr_%d", decl
->Array
.ArrayID
);
257 exec_list_push_tail(&b
->shader
->globals
, &var
->node
);
259 for (i
= 0; i
< array_size
; i
++) {
260 /* point all the matching slots to the same var,
261 * with appropriate offset set, mostly just so
262 * we know what to do when tgsi does a non-indirect
265 c
->temp_regs
[decl
->Range
.First
+ i
].reg
= NULL
;
266 c
->temp_regs
[decl
->Range
.First
+ i
].var
= var
;
267 c
->temp_regs
[decl
->Range
.First
+ i
].offset
= i
;
270 for (i
= 0; i
< array_size
; i
++) {
271 nir_register
*reg
= nir_local_reg_create(b
->impl
);
272 reg
->num_components
= 4;
273 c
->temp_regs
[decl
->Range
.First
+ i
].reg
= reg
;
274 c
->temp_regs
[decl
->Range
.First
+ i
].var
= NULL
;
275 c
->temp_regs
[decl
->Range
.First
+ i
].offset
= 0;
278 } else if (file
== TGSI_FILE_ADDRESS
) {
279 c
->addr_reg
= nir_local_reg_create(b
->impl
);
280 c
->addr_reg
->num_components
= 4;
281 } else if (file
== TGSI_FILE_SYSTEM_VALUE
) {
282 /* Nothing to record for system values. */
283 } else if (file
== TGSI_FILE_SAMPLER
) {
284 /* Nothing to record for samplers. */
285 } else if (file
== TGSI_FILE_SAMPLER_VIEW
) {
286 struct tgsi_declaration_sampler_view
*sview
= &decl
->SamplerView
;
289 assert((sview
->ReturnTypeX
== sview
->ReturnTypeY
) &&
290 (sview
->ReturnTypeX
== sview
->ReturnTypeZ
) &&
291 (sview
->ReturnTypeX
== sview
->ReturnTypeW
));
293 switch (sview
->ReturnTypeX
) {
294 case TGSI_RETURN_TYPE_SINT
:
297 case TGSI_RETURN_TYPE_UINT
:
298 type
= nir_type_uint
;
300 case TGSI_RETURN_TYPE_FLOAT
:
302 type
= nir_type_float
;
306 for (i
= 0; i
< array_size
; i
++) {
307 c
->samp_types
[decl
->Range
.First
+ i
] = type
;
310 bool is_array
= (array_size
> 1);
312 assert(file
== TGSI_FILE_INPUT
||
313 file
== TGSI_FILE_OUTPUT
||
314 file
== TGSI_FILE_CONSTANT
);
316 /* nothing to do for UBOs: */
317 if ((file
== TGSI_FILE_CONSTANT
) && decl
->Declaration
.Dimension
)
320 if ((file
== TGSI_FILE_INPUT
) || (file
== TGSI_FILE_OUTPUT
)) {
321 is_array
= (is_array
&& decl
->Declaration
.Array
&&
322 (decl
->Array
.ArrayID
!= 0));
325 for (i
= 0; i
< array_size
; i
++) {
326 unsigned idx
= decl
->Range
.First
+ i
;
327 nir_variable
*var
= rzalloc(b
->shader
, nir_variable
);
329 var
->data
.driver_location
= idx
;
331 var
->type
= glsl_vec4_type();
333 var
->type
= glsl_array_type(var
->type
, array_size
);
336 case TGSI_FILE_INPUT
:
337 var
->data
.read_only
= true;
338 var
->data
.mode
= nir_var_shader_in
;
339 var
->name
= ralloc_asprintf(var
, "in_%d", idx
);
341 if (c
->scan
->processor
== TGSI_PROCESSOR_FRAGMENT
) {
343 tgsi_varying_semantic_to_slot(decl
->Semantic
.Name
,
344 decl
->Semantic
.Index
);
346 assert(!decl
->Declaration
.Semantic
);
347 var
->data
.location
= VERT_ATTRIB_GENERIC0
+ idx
;
351 /* We definitely need to translate the interpolation field, because
352 * nir_print will decode it.
354 switch (decl
->Interp
.Interpolate
) {
355 case TGSI_INTERPOLATE_CONSTANT
:
356 var
->data
.interpolation
= INTERP_QUALIFIER_FLAT
;
358 case TGSI_INTERPOLATE_LINEAR
:
359 var
->data
.interpolation
= INTERP_QUALIFIER_NOPERSPECTIVE
;
361 case TGSI_INTERPOLATE_PERSPECTIVE
:
362 var
->data
.interpolation
= INTERP_QUALIFIER_SMOOTH
;
366 exec_list_push_tail(&b
->shader
->inputs
, &var
->node
);
368 case TGSI_FILE_OUTPUT
: {
369 int semantic_name
= decl
->Semantic
.Name
;
370 int semantic_index
= decl
->Semantic
.Index
;
371 /* Since we can't load from outputs in the IR, we make temporaries
372 * for the outputs and emit stores to the real outputs at the end of
375 nir_register
*reg
= nir_local_reg_create(b
->impl
);
376 reg
->num_components
= 4;
378 reg
->num_array_elems
= array_size
;
380 var
->data
.mode
= nir_var_shader_out
;
381 var
->name
= ralloc_asprintf(var
, "out_%d", idx
);
384 if (c
->scan
->processor
== TGSI_PROCESSOR_FRAGMENT
) {
385 switch (semantic_name
) {
386 case TGSI_SEMANTIC_COLOR
: {
387 /* TODO tgsi loses some information, so we cannot
388 * actually differentiate here between DSB and MRT
389 * at this point. But so far no drivers using tgsi-
390 * to-nir support dual source blend:
392 bool dual_src_blend
= false;
393 if (dual_src_blend
&& (semantic_index
== 1)) {
394 var
->data
.location
= FRAG_RESULT_DATA0
;
397 if (c
->scan
->properties
[TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS
])
398 var
->data
.location
= FRAG_RESULT_COLOR
;
400 var
->data
.location
= FRAG_RESULT_DATA0
+ semantic_index
;
404 case TGSI_SEMANTIC_POSITION
:
405 var
->data
.location
= FRAG_RESULT_DEPTH
;
408 fprintf(stderr
, "Bad TGSI semantic: %d/%d\n",
409 decl
->Semantic
.Name
, decl
->Semantic
.Index
);
414 tgsi_varying_semantic_to_slot(semantic_name
, semantic_index
);
419 for (j
= 0; j
< array_size
; j
++) {
420 c
->output_regs
[idx
+ j
].offset
= i
+ j
;
421 c
->output_regs
[idx
+ j
].reg
= reg
;
424 c
->output_regs
[idx
].offset
= i
;
425 c
->output_regs
[idx
].reg
= reg
;
428 exec_list_push_tail(&b
->shader
->outputs
, &var
->node
);
431 case TGSI_FILE_CONSTANT
:
432 var
->data
.mode
= nir_var_uniform
;
433 var
->name
= ralloc_asprintf(var
, "uniform_%d", idx
);
435 exec_list_push_tail(&b
->shader
->uniforms
, &var
->node
);
438 unreachable("bad declaration file");
450 ttn_emit_immediate(struct ttn_compile
*c
)
452 nir_builder
*b
= &c
->build
;
453 struct tgsi_full_immediate
*tgsi_imm
= &c
->token
->FullImmediate
;
454 nir_load_const_instr
*load_const
;
457 load_const
= nir_load_const_instr_create(b
->shader
, 4);
458 c
->imm_defs
[c
->next_imm
] = &load_const
->def
;
461 for (i
= 0; i
< 4; i
++)
462 load_const
->value
.u
[i
] = tgsi_imm
->u
[i
].Uint
;
464 nir_builder_instr_insert(b
, &load_const
->instr
);
468 ttn_src_for_indirect(struct ttn_compile
*c
, struct tgsi_ind_register
*indirect
);
470 /* generate either a constant or indirect deref chain for accessing an
473 static nir_deref_var
*
474 ttn_array_deref(struct ttn_compile
*c
, nir_intrinsic_instr
*instr
,
475 nir_variable
*var
, unsigned offset
,
476 struct tgsi_ind_register
*indirect
)
478 nir_deref_var
*deref
= nir_deref_var_create(instr
, var
);
479 nir_deref_array
*arr
= nir_deref_array_create(deref
);
481 arr
->base_offset
= offset
;
482 arr
->deref
.type
= glsl_get_array_element(var
->type
);
485 arr
->deref_array_type
= nir_deref_array_type_indirect
;
486 arr
->indirect
= nir_src_for_ssa(ttn_src_for_indirect(c
, indirect
));
488 arr
->deref_array_type
= nir_deref_array_type_direct
;
491 deref
->deref
.child
= &arr
->deref
;
497 ttn_src_for_file_and_index(struct ttn_compile
*c
, unsigned file
, unsigned index
,
498 struct tgsi_ind_register
*indirect
,
499 struct tgsi_dimension
*dim
,
500 struct tgsi_ind_register
*dimind
)
502 nir_builder
*b
= &c
->build
;
505 memset(&src
, 0, sizeof(src
));
508 case TGSI_FILE_TEMPORARY
:
509 if (c
->temp_regs
[index
].var
) {
510 unsigned offset
= c
->temp_regs
[index
].offset
;
511 nir_variable
*var
= c
->temp_regs
[index
].var
;
512 nir_intrinsic_instr
*load
;
514 load
= nir_intrinsic_instr_create(b
->shader
,
515 nir_intrinsic_load_var
);
516 load
->num_components
= 4;
517 load
->variables
[0] = ttn_array_deref(c
, load
, var
, offset
, indirect
);
519 nir_ssa_dest_init(&load
->instr
, &load
->dest
, 4, NULL
);
520 nir_builder_instr_insert(b
, &load
->instr
);
522 src
= nir_src_for_ssa(&load
->dest
.ssa
);
526 src
.reg
.reg
= c
->temp_regs
[index
].reg
;
531 case TGSI_FILE_ADDRESS
:
532 src
.reg
.reg
= c
->addr_reg
;
536 case TGSI_FILE_IMMEDIATE
:
537 src
= nir_src_for_ssa(c
->imm_defs
[index
]);
542 case TGSI_FILE_SYSTEM_VALUE
: {
543 nir_intrinsic_instr
*load
;
550 switch (c
->scan
->system_value_semantic_name
[index
]) {
551 case TGSI_SEMANTIC_VERTEXID_NOBASE
:
552 op
= nir_intrinsic_load_vertex_id_zero_base
;
554 case TGSI_SEMANTIC_VERTEXID
:
555 op
= nir_intrinsic_load_vertex_id
;
557 case TGSI_SEMANTIC_BASEVERTEX
:
558 op
= nir_intrinsic_load_base_vertex
;
560 case TGSI_SEMANTIC_INSTANCEID
:
561 op
= nir_intrinsic_load_instance_id
;
564 unreachable("bad system value");
567 load
= nir_intrinsic_instr_create(b
->shader
, op
);
568 load
->num_components
= ncomp
;
570 nir_ssa_dest_init(&load
->instr
, &load
->dest
, ncomp
, NULL
);
571 nir_builder_instr_insert(b
, &load
->instr
);
573 src
= nir_src_for_ssa(&load
->dest
.ssa
);
577 case TGSI_FILE_INPUT
:
578 case TGSI_FILE_CONSTANT
: {
579 nir_intrinsic_instr
*load
;
584 case TGSI_FILE_INPUT
:
585 op
= nir_intrinsic_load_input
;
588 case TGSI_FILE_CONSTANT
:
590 op
= nir_intrinsic_load_ubo
;
592 op
= nir_intrinsic_load_uniform
;
596 unreachable("No other load files supported");
600 load
= nir_intrinsic_instr_create(b
->shader
, op
);
602 load
->num_components
= 4;
606 ttn_src_for_file_and_index(c
, dimind
->File
, dimind
->Index
,
609 /* UBOs start at index 1 in TGSI: */
611 nir_src_for_ssa(nir_imm_int(b
, dim
->Index
- 1));
618 /* UBO loads don't have a const_index[0] base offset. */
619 offset
= nir_imm_int(b
, index
);
621 offset
= nir_iadd(b
, offset
, ttn_src_for_indirect(c
, indirect
));
623 /* UBO offsets are in bytes, but TGSI gives them to us in vec4's */
624 offset
= nir_ishl(b
, offset
, nir_imm_int(b
, 4));
626 load
->const_index
[0] = index
;
628 offset
= ttn_src_for_indirect(c
, indirect
);
630 offset
= nir_imm_int(b
, 0);
633 load
->src
[srcn
++] = nir_src_for_ssa(offset
);
635 nir_ssa_dest_init(&load
->instr
, &load
->dest
, 4, NULL
);
636 nir_builder_instr_insert(b
, &load
->instr
);
638 src
= nir_src_for_ssa(&load
->dest
.ssa
);
643 unreachable("bad src file");
651 ttn_src_for_indirect(struct ttn_compile
*c
, struct tgsi_ind_register
*indirect
)
653 nir_builder
*b
= &c
->build
;
655 memset(&src
, 0, sizeof(src
));
656 for (int i
= 0; i
< 4; i
++)
657 src
.swizzle
[i
] = indirect
->Swizzle
;
658 src
.src
= ttn_src_for_file_and_index(c
,
662 return nir_imov_alu(b
, src
, 1);
666 ttn_get_dest(struct ttn_compile
*c
, struct tgsi_full_dst_register
*tgsi_fdst
)
668 struct tgsi_dst_register
*tgsi_dst
= &tgsi_fdst
->Register
;
670 unsigned index
= tgsi_dst
->Index
;
672 memset(&dest
, 0, sizeof(dest
));
674 if (tgsi_dst
->File
== TGSI_FILE_TEMPORARY
) {
675 if (c
->temp_regs
[index
].var
) {
676 nir_builder
*b
= &c
->build
;
677 nir_intrinsic_instr
*load
;
678 struct tgsi_ind_register
*indirect
=
679 tgsi_dst
->Indirect
? &tgsi_fdst
->Indirect
: NULL
;
682 /* this works, because TGSI will give us a base offset
683 * (in case of indirect index) that points back into
684 * the array. Access can be direct or indirect, we
685 * don't really care. Just create a one-shot dst reg
686 * that will get store_var'd back into the array var
687 * at the end of ttn_emit_instruction()
689 reg
= nir_local_reg_create(c
->build
.impl
);
690 reg
->num_components
= 4;
691 dest
.dest
.reg
.reg
= reg
;
692 dest
.dest
.reg
.base_offset
= 0;
694 /* since the alu op might not write to all components
695 * of the temporary, we must first do a load_var to
696 * get the previous array elements into the register.
697 * This is one area that NIR could use a bit of
698 * improvement (or opt pass to clean up the mess
699 * once things are scalarized)
702 load
= nir_intrinsic_instr_create(c
->build
.shader
,
703 nir_intrinsic_load_var
);
704 load
->num_components
= 4;
706 ttn_array_deref(c
, load
, c
->temp_regs
[index
].var
,
707 c
->temp_regs
[index
].offset
,
710 load
->dest
= nir_dest_for_reg(reg
);
712 nir_builder_instr_insert(b
, &load
->instr
);
714 assert(!tgsi_dst
->Indirect
);
715 dest
.dest
.reg
.reg
= c
->temp_regs
[index
].reg
;
716 dest
.dest
.reg
.base_offset
= c
->temp_regs
[index
].offset
;
718 } else if (tgsi_dst
->File
== TGSI_FILE_OUTPUT
) {
719 dest
.dest
.reg
.reg
= c
->output_regs
[index
].reg
;
720 dest
.dest
.reg
.base_offset
= c
->output_regs
[index
].offset
;
721 } else if (tgsi_dst
->File
== TGSI_FILE_ADDRESS
) {
723 dest
.dest
.reg
.reg
= c
->addr_reg
;
726 dest
.write_mask
= tgsi_dst
->WriteMask
;
727 dest
.saturate
= false;
729 if (tgsi_dst
->Indirect
&& (tgsi_dst
->File
!= TGSI_FILE_TEMPORARY
)) {
730 nir_src
*indirect
= ralloc(c
->build
.shader
, nir_src
);
731 *indirect
= nir_src_for_ssa(ttn_src_for_indirect(c
, &tgsi_fdst
->Indirect
));
732 dest
.dest
.reg
.indirect
= indirect
;
738 static nir_variable
*
739 ttn_get_var(struct ttn_compile
*c
, struct tgsi_full_dst_register
*tgsi_fdst
)
741 struct tgsi_dst_register
*tgsi_dst
= &tgsi_fdst
->Register
;
742 unsigned index
= tgsi_dst
->Index
;
744 if (tgsi_dst
->File
== TGSI_FILE_TEMPORARY
) {
745 /* we should not have an indirect when there is no var! */
746 if (!c
->temp_regs
[index
].var
)
747 assert(!tgsi_dst
->Indirect
);
748 return c
->temp_regs
[index
].var
;
755 ttn_get_src(struct ttn_compile
*c
, struct tgsi_full_src_register
*tgsi_fsrc
)
757 nir_builder
*b
= &c
->build
;
758 struct tgsi_src_register
*tgsi_src
= &tgsi_fsrc
->Register
;
759 unsigned tgsi_opcode
= c
->token
->FullInstruction
.Instruction
.Opcode
;
760 unsigned tgsi_src_type
= tgsi_opcode_infer_src_type(tgsi_opcode
);
761 bool src_is_float
= !(tgsi_src_type
== TGSI_TYPE_SIGNED
||
762 tgsi_src_type
== TGSI_TYPE_UNSIGNED
);
765 memset(&src
, 0, sizeof(src
));
767 if (tgsi_src
->File
== TGSI_FILE_NULL
) {
768 return nir_imm_float(b
, 0.0);
769 } else if (tgsi_src
->File
== TGSI_FILE_SAMPLER
) {
770 /* Only the index of the sampler gets used in texturing, and it will
771 * handle looking that up on its own instead of using the nir_alu_src.
773 assert(!tgsi_src
->Indirect
);
776 struct tgsi_ind_register
*ind
= NULL
;
777 struct tgsi_dimension
*dim
= NULL
;
778 struct tgsi_ind_register
*dimind
= NULL
;
779 if (tgsi_src
->Indirect
)
780 ind
= &tgsi_fsrc
->Indirect
;
781 if (tgsi_src
->Dimension
) {
782 dim
= &tgsi_fsrc
->Dimension
;
784 dimind
= &tgsi_fsrc
->DimIndirect
;
786 src
.src
= ttn_src_for_file_and_index(c
,
792 src
.swizzle
[0] = tgsi_src
->SwizzleX
;
793 src
.swizzle
[1] = tgsi_src
->SwizzleY
;
794 src
.swizzle
[2] = tgsi_src
->SwizzleZ
;
795 src
.swizzle
[3] = tgsi_src
->SwizzleW
;
797 nir_ssa_def
*def
= nir_fmov_alu(b
, src
, 4);
799 if (tgsi_src
->Absolute
) {
801 def
= nir_fabs(b
, def
);
803 def
= nir_iabs(b
, def
);
806 if (tgsi_src
->Negate
) {
808 def
= nir_fneg(b
, def
);
810 def
= nir_ineg(b
, def
);
817 ttn_alu(nir_builder
*b
, nir_op op
, nir_alu_dest dest
, nir_ssa_def
**src
)
819 unsigned num_srcs
= nir_op_infos
[op
].num_inputs
;
820 nir_alu_instr
*instr
= nir_alu_instr_create(b
->shader
, op
);
823 for (i
= 0; i
< num_srcs
; i
++)
824 instr
->src
[i
].src
= nir_src_for_ssa(src
[i
]);
827 nir_builder_instr_insert(b
, &instr
->instr
);
831 ttn_move_dest_masked(nir_builder
*b
, nir_alu_dest dest
,
832 nir_ssa_def
*def
, unsigned write_mask
)
834 if (!(dest
.write_mask
& write_mask
))
837 nir_alu_instr
*mov
= nir_alu_instr_create(b
->shader
, nir_op_imov
);
839 mov
->dest
.write_mask
&= write_mask
;
840 mov
->src
[0].src
= nir_src_for_ssa(def
);
841 for (unsigned i
= def
->num_components
; i
< 4; i
++)
842 mov
->src
[0].swizzle
[i
] = def
->num_components
- 1;
843 nir_builder_instr_insert(b
, &mov
->instr
);
847 ttn_move_dest(nir_builder
*b
, nir_alu_dest dest
, nir_ssa_def
*def
)
849 ttn_move_dest_masked(b
, dest
, def
, TGSI_WRITEMASK_XYZW
);
853 ttn_arl(nir_builder
*b
, nir_op op
, nir_alu_dest dest
, nir_ssa_def
**src
)
855 ttn_move_dest(b
, dest
, nir_f2i(b
, nir_ffloor(b
, src
[0])));
858 /* EXP - Approximate Exponential Base 2
859 * dst.x = 2^{\lfloor src.x\rfloor}
860 * dst.y = src.x - \lfloor src.x\rfloor
865 ttn_exp(nir_builder
*b
, nir_op op
, nir_alu_dest dest
, nir_ssa_def
**src
)
867 nir_ssa_def
*srcx
= ttn_channel(b
, src
[0], X
);
869 ttn_move_dest_masked(b
, dest
, nir_fexp2(b
, nir_ffloor(b
, srcx
)),
871 ttn_move_dest_masked(b
, dest
, nir_fsub(b
, srcx
, nir_ffloor(b
, srcx
)),
873 ttn_move_dest_masked(b
, dest
, nir_fexp2(b
, srcx
), TGSI_WRITEMASK_Z
);
874 ttn_move_dest_masked(b
, dest
, nir_imm_float(b
, 1.0), TGSI_WRITEMASK_W
);
877 /* LOG - Approximate Logarithm Base 2
878 * dst.x = \lfloor\log_2{|src.x|}\rfloor
879 * dst.y = \frac{|src.x|}{2^{\lfloor\log_2{|src.x|}\rfloor}}
880 * dst.z = \log_2{|src.x|}
884 ttn_log(nir_builder
*b
, nir_op op
, nir_alu_dest dest
, nir_ssa_def
**src
)
886 nir_ssa_def
*abs_srcx
= nir_fabs(b
, ttn_channel(b
, src
[0], X
));
887 nir_ssa_def
*log2
= nir_flog2(b
, abs_srcx
);
889 ttn_move_dest_masked(b
, dest
, nir_ffloor(b
, log2
), TGSI_WRITEMASK_X
);
890 ttn_move_dest_masked(b
, dest
,
891 nir_fdiv(b
, abs_srcx
, nir_fexp2(b
, nir_ffloor(b
, log2
))),
893 ttn_move_dest_masked(b
, dest
, nir_flog2(b
, abs_srcx
), TGSI_WRITEMASK_Z
);
894 ttn_move_dest_masked(b
, dest
, nir_imm_float(b
, 1.0), TGSI_WRITEMASK_W
);
897 /* DST - Distance Vector
899 * dst.y = src0.y \times src1.y
904 ttn_dst(nir_builder
*b
, nir_op op
, nir_alu_dest dest
, nir_ssa_def
**src
)
906 ttn_move_dest_masked(b
, dest
, nir_imm_float(b
, 1.0), TGSI_WRITEMASK_X
);
907 ttn_move_dest_masked(b
, dest
, nir_fmul(b
, src
[0], src
[1]), TGSI_WRITEMASK_Y
);
908 ttn_move_dest_masked(b
, dest
, nir_fmov(b
, src
[0]), TGSI_WRITEMASK_Z
);
909 ttn_move_dest_masked(b
, dest
, nir_fmov(b
, src
[1]), TGSI_WRITEMASK_W
);
912 /* LIT - Light Coefficients
914 * dst.y = max(src.x, 0.0)
915 * dst.z = (src.x > 0.0) ? max(src.y, 0.0)^{clamp(src.w, -128.0, 128.0))} : 0
919 ttn_lit(nir_builder
*b
, nir_op op
, nir_alu_dest dest
, nir_ssa_def
**src
)
921 ttn_move_dest_masked(b
, dest
, nir_imm_float(b
, 1.0), TGSI_WRITEMASK_XW
);
923 ttn_move_dest_masked(b
, dest
, nir_fmax(b
, ttn_channel(b
, src
[0], X
),
924 nir_imm_float(b
, 0.0)), TGSI_WRITEMASK_Y
);
926 if (dest
.write_mask
& TGSI_WRITEMASK_Z
) {
927 nir_ssa_def
*src0_y
= ttn_channel(b
, src
[0], Y
);
928 nir_ssa_def
*wclamp
= nir_fmax(b
, nir_fmin(b
, ttn_channel(b
, src
[0], W
),
929 nir_imm_float(b
, 128.0)),
930 nir_imm_float(b
, -128.0));
931 nir_ssa_def
*pow
= nir_fpow(b
, nir_fmax(b
, src0_y
, nir_imm_float(b
, 0.0)),
934 ttn_move_dest_masked(b
, dest
,
937 nir_imm_float(b
, 0.0),
938 ttn_channel(b
, src
[0], X
)),
939 nir_imm_float(b
, 0.0),
946 * dst.x = \cos{src.x}
947 * dst.y = \sin{src.x}
952 ttn_scs(nir_builder
*b
, nir_op op
, nir_alu_dest dest
, nir_ssa_def
**src
)
954 ttn_move_dest_masked(b
, dest
, nir_fcos(b
, ttn_channel(b
, src
[0], X
)),
956 ttn_move_dest_masked(b
, dest
, nir_fsin(b
, ttn_channel(b
, src
[0], X
)),
958 ttn_move_dest_masked(b
, dest
, nir_imm_float(b
, 0.0), TGSI_WRITEMASK_Z
);
959 ttn_move_dest_masked(b
, dest
, nir_imm_float(b
, 1.0), TGSI_WRITEMASK_W
);
963 ttn_sle(nir_builder
*b
, nir_op op
, nir_alu_dest dest
, nir_ssa_def
**src
)
965 ttn_move_dest(b
, dest
, nir_sge(b
, src
[1], src
[0]));
969 ttn_sgt(nir_builder
*b
, nir_op op
, nir_alu_dest dest
, nir_ssa_def
**src
)
971 ttn_move_dest(b
, dest
, nir_slt(b
, src
[1], src
[0]));
975 ttn_clamp(nir_builder
*b
, nir_op op
, nir_alu_dest dest
, nir_ssa_def
**src
)
977 ttn_move_dest(b
, dest
, nir_fmin(b
, nir_fmax(b
, src
[0], src
[1]), src
[2]));
981 ttn_xpd(nir_builder
*b
, nir_op op
, nir_alu_dest dest
, nir_ssa_def
**src
)
983 ttn_move_dest_masked(b
, dest
,
986 ttn_swizzle(b
, src
[0], Y
, Z
, X
, X
),
987 ttn_swizzle(b
, src
[1], Z
, X
, Y
, X
)),
989 ttn_swizzle(b
, src
[1], Y
, Z
, X
, X
),
990 ttn_swizzle(b
, src
[0], Z
, X
, Y
, X
))),
992 ttn_move_dest_masked(b
, dest
, nir_imm_float(b
, 1.0), TGSI_WRITEMASK_W
);
996 ttn_dp2a(nir_builder
*b
, nir_op op
, nir_alu_dest dest
, nir_ssa_def
**src
)
998 ttn_move_dest(b
, dest
,
999 ttn_channel(b
, nir_fadd(b
, nir_fdot2(b
, src
[0], src
[1]),
1005 ttn_dp2(nir_builder
*b
, nir_op op
, nir_alu_dest dest
, nir_ssa_def
**src
)
1007 ttn_move_dest(b
, dest
, nir_fdot2(b
, src
[0], src
[1]));
1011 ttn_dp3(nir_builder
*b
, nir_op op
, nir_alu_dest dest
, nir_ssa_def
**src
)
1013 ttn_move_dest(b
, dest
, nir_fdot3(b
, src
[0], src
[1]));
1017 ttn_dp4(nir_builder
*b
, nir_op op
, nir_alu_dest dest
, nir_ssa_def
**src
)
1019 ttn_move_dest(b
, dest
, nir_fdot4(b
, src
[0], src
[1]));
1023 ttn_dph(nir_builder
*b
, nir_op op
, nir_alu_dest dest
, nir_ssa_def
**src
)
1025 ttn_move_dest(b
, dest
, nir_fadd(b
, nir_fdot3(b
, src
[0], src
[1]),
1026 ttn_channel(b
, src
[1], W
)));
1030 ttn_umad(nir_builder
*b
, nir_op op
, nir_alu_dest dest
, nir_ssa_def
**src
)
1032 ttn_move_dest(b
, dest
, nir_iadd(b
, nir_imul(b
, src
[0], src
[1]), src
[2]));
1036 ttn_arr(nir_builder
*b
, nir_op op
, nir_alu_dest dest
, nir_ssa_def
**src
)
1038 ttn_move_dest(b
, dest
, nir_ffloor(b
, nir_fadd(b
, src
[0], nir_imm_float(b
, 0.5))));
1042 ttn_cmp(nir_builder
*b
, nir_op op
, nir_alu_dest dest
, nir_ssa_def
**src
)
1044 ttn_move_dest(b
, dest
, nir_bcsel(b
,
1045 nir_flt(b
, src
[0], nir_imm_float(b
, 0.0)),
1050 ttn_ucmp(nir_builder
*b
, nir_op op
, nir_alu_dest dest
, nir_ssa_def
**src
)
1052 ttn_move_dest(b
, dest
, nir_bcsel(b
,
1053 nir_ine(b
, src
[0], nir_imm_int(b
, 0)),
1058 ttn_kill(nir_builder
*b
, nir_op op
, nir_alu_dest dest
, nir_ssa_def
**src
)
1060 nir_intrinsic_instr
*discard
=
1061 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_discard
);
1062 nir_builder_instr_insert(b
, &discard
->instr
);
1066 ttn_kill_if(nir_builder
*b
, nir_op op
, nir_alu_dest dest
, nir_ssa_def
**src
)
1068 nir_ssa_def
*cmp
= nir_bany_inequal4(b
, nir_flt(b
, src
[0],
1069 nir_imm_float(b
, 0.0)),
1071 nir_intrinsic_instr
*discard
=
1072 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_discard_if
);
1073 discard
->src
[0] = nir_src_for_ssa(cmp
);
1074 nir_builder_instr_insert(b
, &discard
->instr
);
1078 ttn_if(struct ttn_compile
*c
, nir_ssa_def
*src
, bool is_uint
)
1080 nir_builder
*b
= &c
->build
;
1082 src
= ttn_channel(b
, src
, X
);
1084 nir_if
*if_stmt
= nir_if_create(b
->shader
);
1086 if_stmt
->condition
= nir_src_for_ssa(nir_ine(b
, src
, nir_imm_int(b
, 0)));
1088 if_stmt
->condition
= nir_src_for_ssa(nir_fne(b
, src
, nir_imm_int(b
, 0)));
1090 nir_builder_cf_insert(b
, &if_stmt
->cf_node
);
1092 c
->if_stack
[c
->if_stack_pos
] = nir_after_cf_node(&if_stmt
->cf_node
);
1095 b
->cursor
= nir_after_cf_list(&if_stmt
->then_list
);
1097 c
->if_stack
[c
->if_stack_pos
] = nir_after_cf_list(&if_stmt
->else_list
);
1102 ttn_else(struct ttn_compile
*c
)
1104 nir_builder
*b
= &c
->build
;
1106 b
->cursor
= c
->if_stack
[c
->if_stack_pos
- 1];
1110 ttn_endif(struct ttn_compile
*c
)
1112 nir_builder
*b
= &c
->build
;
1114 c
->if_stack_pos
-= 2;
1115 b
->cursor
= c
->if_stack
[c
->if_stack_pos
];
1119 ttn_bgnloop(struct ttn_compile
*c
)
1121 nir_builder
*b
= &c
->build
;
1123 nir_loop
*loop
= nir_loop_create(b
->shader
);
1124 nir_builder_cf_insert(b
, &loop
->cf_node
);
1126 c
->loop_stack
[c
->loop_stack_pos
] = nir_after_cf_node(&loop
->cf_node
);
1127 c
->loop_stack_pos
++;
1129 b
->cursor
= nir_after_cf_list(&loop
->body
);
1133 ttn_cont(nir_builder
*b
)
1135 nir_jump_instr
*instr
= nir_jump_instr_create(b
->shader
, nir_jump_continue
);
1136 nir_builder_instr_insert(b
, &instr
->instr
);
1140 ttn_brk(nir_builder
*b
)
1142 nir_jump_instr
*instr
= nir_jump_instr_create(b
->shader
, nir_jump_break
);
1143 nir_builder_instr_insert(b
, &instr
->instr
);
1147 ttn_endloop(struct ttn_compile
*c
)
1149 nir_builder
*b
= &c
->build
;
1151 c
->loop_stack_pos
--;
1152 b
->cursor
= c
->loop_stack
[c
->loop_stack_pos
];
1156 setup_texture_info(nir_tex_instr
*instr
, unsigned texture
)
1159 case TGSI_TEXTURE_BUFFER
:
1160 instr
->sampler_dim
= GLSL_SAMPLER_DIM_BUF
;
1162 case TGSI_TEXTURE_1D
:
1163 instr
->sampler_dim
= GLSL_SAMPLER_DIM_1D
;
1165 case TGSI_TEXTURE_1D_ARRAY
:
1166 instr
->sampler_dim
= GLSL_SAMPLER_DIM_1D
;
1167 instr
->is_array
= true;
1169 case TGSI_TEXTURE_SHADOW1D
:
1170 instr
->sampler_dim
= GLSL_SAMPLER_DIM_1D
;
1171 instr
->is_shadow
= true;
1173 case TGSI_TEXTURE_SHADOW1D_ARRAY
:
1174 instr
->sampler_dim
= GLSL_SAMPLER_DIM_1D
;
1175 instr
->is_shadow
= true;
1176 instr
->is_array
= true;
1178 case TGSI_TEXTURE_2D
:
1179 instr
->sampler_dim
= GLSL_SAMPLER_DIM_2D
;
1181 case TGSI_TEXTURE_2D_ARRAY
:
1182 instr
->sampler_dim
= GLSL_SAMPLER_DIM_2D
;
1183 instr
->is_array
= true;
1185 case TGSI_TEXTURE_2D_MSAA
:
1186 instr
->sampler_dim
= GLSL_SAMPLER_DIM_MS
;
1188 case TGSI_TEXTURE_2D_ARRAY_MSAA
:
1189 instr
->sampler_dim
= GLSL_SAMPLER_DIM_MS
;
1190 instr
->is_array
= true;
1192 case TGSI_TEXTURE_SHADOW2D
:
1193 instr
->sampler_dim
= GLSL_SAMPLER_DIM_2D
;
1194 instr
->is_shadow
= true;
1196 case TGSI_TEXTURE_SHADOW2D_ARRAY
:
1197 instr
->sampler_dim
= GLSL_SAMPLER_DIM_2D
;
1198 instr
->is_shadow
= true;
1199 instr
->is_array
= true;
1201 case TGSI_TEXTURE_3D
:
1202 instr
->sampler_dim
= GLSL_SAMPLER_DIM_3D
;
1204 case TGSI_TEXTURE_CUBE
:
1205 instr
->sampler_dim
= GLSL_SAMPLER_DIM_CUBE
;
1207 case TGSI_TEXTURE_CUBE_ARRAY
:
1208 instr
->sampler_dim
= GLSL_SAMPLER_DIM_CUBE
;
1209 instr
->is_array
= true;
1211 case TGSI_TEXTURE_SHADOWCUBE
:
1212 instr
->sampler_dim
= GLSL_SAMPLER_DIM_CUBE
;
1213 instr
->is_shadow
= true;
1215 case TGSI_TEXTURE_SHADOWCUBE_ARRAY
:
1216 instr
->sampler_dim
= GLSL_SAMPLER_DIM_CUBE
;
1217 instr
->is_shadow
= true;
1218 instr
->is_array
= true;
1220 case TGSI_TEXTURE_RECT
:
1221 instr
->sampler_dim
= GLSL_SAMPLER_DIM_RECT
;
1223 case TGSI_TEXTURE_SHADOWRECT
:
1224 instr
->sampler_dim
= GLSL_SAMPLER_DIM_RECT
;
1225 instr
->is_shadow
= true;
1228 fprintf(stderr
, "Unknown TGSI texture target %d\n", texture
);
1234 ttn_tex(struct ttn_compile
*c
, nir_alu_dest dest
, nir_ssa_def
**src
)
1236 nir_builder
*b
= &c
->build
;
1237 struct tgsi_full_instruction
*tgsi_inst
= &c
->token
->FullInstruction
;
1238 nir_tex_instr
*instr
;
1240 unsigned num_srcs
, samp
= 1, sview
, i
;
1242 switch (tgsi_inst
->Instruction
.Opcode
) {
1243 case TGSI_OPCODE_TEX
:
1247 case TGSI_OPCODE_TEX2
:
1252 case TGSI_OPCODE_TXP
:
1256 case TGSI_OPCODE_TXB
:
1260 case TGSI_OPCODE_TXB2
:
1265 case TGSI_OPCODE_TXL
:
1269 case TGSI_OPCODE_TXL2
:
1274 case TGSI_OPCODE_TXF
:
1275 if (tgsi_inst
->Texture
.Texture
== TGSI_TEXTURE_2D_MSAA
||
1276 tgsi_inst
->Texture
.Texture
== TGSI_TEXTURE_2D_ARRAY_MSAA
) {
1277 op
= nir_texop_txf_ms
;
1283 case TGSI_OPCODE_TXD
:
1288 case TGSI_OPCODE_LODQ
:
1294 fprintf(stderr
, "unknown TGSI tex op %d\n", tgsi_inst
->Instruction
.Opcode
);
1298 if (tgsi_inst
->Texture
.Texture
== TGSI_TEXTURE_SHADOW1D
||
1299 tgsi_inst
->Texture
.Texture
== TGSI_TEXTURE_SHADOW1D_ARRAY
||
1300 tgsi_inst
->Texture
.Texture
== TGSI_TEXTURE_SHADOW2D
||
1301 tgsi_inst
->Texture
.Texture
== TGSI_TEXTURE_SHADOW2D_ARRAY
||
1302 tgsi_inst
->Texture
.Texture
== TGSI_TEXTURE_SHADOWRECT
||
1303 tgsi_inst
->Texture
.Texture
== TGSI_TEXTURE_SHADOWCUBE
||
1304 tgsi_inst
->Texture
.Texture
== TGSI_TEXTURE_SHADOWCUBE_ARRAY
) {
1308 num_srcs
+= tgsi_inst
->Texture
.NumOffsets
;
1310 instr
= nir_tex_instr_create(b
->shader
, num_srcs
);
1313 setup_texture_info(instr
, tgsi_inst
->Texture
.Texture
);
1315 switch (instr
->sampler_dim
) {
1316 case GLSL_SAMPLER_DIM_1D
:
1317 case GLSL_SAMPLER_DIM_BUF
:
1318 instr
->coord_components
= 1;
1320 case GLSL_SAMPLER_DIM_2D
:
1321 case GLSL_SAMPLER_DIM_RECT
:
1322 case GLSL_SAMPLER_DIM_EXTERNAL
:
1323 case GLSL_SAMPLER_DIM_MS
:
1324 instr
->coord_components
= 2;
1326 case GLSL_SAMPLER_DIM_3D
:
1327 case GLSL_SAMPLER_DIM_CUBE
:
1328 instr
->coord_components
= 3;
1332 if (instr
->is_array
)
1333 instr
->coord_components
++;
1335 assert(tgsi_inst
->Src
[samp
].Register
.File
== TGSI_FILE_SAMPLER
);
1336 instr
->sampler_index
= tgsi_inst
->Src
[samp
].Register
.Index
;
1338 /* TODO if we supported any opc's which take an explicit SVIEW
1339 * src, we would use that here instead. But for the "legacy"
1340 * texture opc's the SVIEW index is same as SAMP index:
1342 sview
= instr
->sampler_index
;
1344 if (op
== nir_texop_lod
) {
1345 instr
->dest_type
= nir_type_float
;
1346 } else if (sview
< c
->num_samp_types
) {
1347 instr
->dest_type
= c
->samp_types
[sview
];
1349 instr
->dest_type
= nir_type_float
;
1352 unsigned src_number
= 0;
1354 instr
->src
[src_number
].src
=
1355 nir_src_for_ssa(nir_swizzle(b
, src
[0], SWIZ(X
, Y
, Z
, W
),
1356 instr
->coord_components
, false));
1357 instr
->src
[src_number
].src_type
= nir_tex_src_coord
;
1360 if (tgsi_inst
->Instruction
.Opcode
== TGSI_OPCODE_TXP
) {
1361 instr
->src
[src_number
].src
= nir_src_for_ssa(ttn_channel(b
, src
[0], W
));
1362 instr
->src
[src_number
].src_type
= nir_tex_src_projector
;
1366 if (tgsi_inst
->Instruction
.Opcode
== TGSI_OPCODE_TXB
) {
1367 instr
->src
[src_number
].src
= nir_src_for_ssa(ttn_channel(b
, src
[0], W
));
1368 instr
->src
[src_number
].src_type
= nir_tex_src_bias
;
1372 if (tgsi_inst
->Instruction
.Opcode
== TGSI_OPCODE_TXB2
) {
1373 instr
->src
[src_number
].src
= nir_src_for_ssa(ttn_channel(b
, src
[1], X
));
1374 instr
->src
[src_number
].src_type
= nir_tex_src_bias
;
1378 if (tgsi_inst
->Instruction
.Opcode
== TGSI_OPCODE_TXL
) {
1379 instr
->src
[src_number
].src
= nir_src_for_ssa(ttn_channel(b
, src
[0], W
));
1380 instr
->src
[src_number
].src_type
= nir_tex_src_lod
;
1384 if (tgsi_inst
->Instruction
.Opcode
== TGSI_OPCODE_TXL2
) {
1385 instr
->src
[src_number
].src
= nir_src_for_ssa(ttn_channel(b
, src
[1], X
));
1386 instr
->src
[src_number
].src_type
= nir_tex_src_lod
;
1390 if (tgsi_inst
->Instruction
.Opcode
== TGSI_OPCODE_TXF
) {
1391 instr
->src
[src_number
].src
= nir_src_for_ssa(ttn_channel(b
, src
[0], W
));
1392 if (op
== nir_texop_txf_ms
)
1393 instr
->src
[src_number
].src_type
= nir_tex_src_ms_index
;
1395 instr
->src
[src_number
].src_type
= nir_tex_src_lod
;
1399 if (tgsi_inst
->Instruction
.Opcode
== TGSI_OPCODE_TXD
) {
1400 instr
->src
[src_number
].src
=
1401 nir_src_for_ssa(nir_swizzle(b
, src
[1], SWIZ(X
, Y
, Z
, W
),
1402 instr
->coord_components
, false));
1403 instr
->src
[src_number
].src_type
= nir_tex_src_ddx
;
1405 instr
->src
[src_number
].src
=
1406 nir_src_for_ssa(nir_swizzle(b
, src
[2], SWIZ(X
, Y
, Z
, W
),
1407 instr
->coord_components
, false));
1408 instr
->src
[src_number
].src_type
= nir_tex_src_ddy
;
1412 if (instr
->is_shadow
) {
1413 if (instr
->coord_components
== 4)
1414 instr
->src
[src_number
].src
= nir_src_for_ssa(ttn_channel(b
, src
[1], X
));
1415 else if (instr
->coord_components
== 3)
1416 instr
->src
[src_number
].src
= nir_src_for_ssa(ttn_channel(b
, src
[0], W
));
1418 instr
->src
[src_number
].src
= nir_src_for_ssa(ttn_channel(b
, src
[0], Z
));
1420 instr
->src
[src_number
].src_type
= nir_tex_src_comparitor
;
1424 for (i
= 0; i
< tgsi_inst
->Texture
.NumOffsets
; i
++) {
1425 struct tgsi_texture_offset
*tex_offset
= &tgsi_inst
->TexOffsets
[i
];
1426 /* since TexOffset ins't using tgsi_full_src_register we get to
1427 * do some extra gymnastics:
1431 memset(&src
, 0, sizeof(src
));
1433 src
.src
= ttn_src_for_file_and_index(c
,
1438 src
.swizzle
[0] = tex_offset
->SwizzleX
;
1439 src
.swizzle
[1] = tex_offset
->SwizzleY
;
1440 src
.swizzle
[2] = tex_offset
->SwizzleZ
;
1441 src
.swizzle
[3] = TGSI_SWIZZLE_W
;
1443 instr
->src
[src_number
].src_type
= nir_tex_src_offset
;
1444 instr
->src
[src_number
].src
= nir_src_for_ssa(
1445 nir_fmov_alu(b
, src
, nir_tex_instr_src_size(instr
, src_number
)));
1449 assert(src_number
== num_srcs
);
1451 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
, 4, NULL
);
1452 nir_builder_instr_insert(b
, &instr
->instr
);
1454 /* Resolve the writemask on the texture op. */
1455 ttn_move_dest(b
, dest
, &instr
->dest
.ssa
);
1458 /* TGSI_OPCODE_TXQ is actually two distinct operations:
1460 * dst.x = texture\_width(unit, lod)
1461 * dst.y = texture\_height(unit, lod)
1462 * dst.z = texture\_depth(unit, lod)
1463 * dst.w = texture\_levels(unit)
1465 * dst.xyz map to NIR txs opcode, and dst.w maps to query_levels
1468 ttn_txq(struct ttn_compile
*c
, nir_alu_dest dest
, nir_ssa_def
**src
)
1470 nir_builder
*b
= &c
->build
;
1471 struct tgsi_full_instruction
*tgsi_inst
= &c
->token
->FullInstruction
;
1472 nir_tex_instr
*txs
, *qlv
;
1474 txs
= nir_tex_instr_create(b
->shader
, 1);
1475 txs
->op
= nir_texop_txs
;
1476 setup_texture_info(txs
, tgsi_inst
->Texture
.Texture
);
1478 qlv
= nir_tex_instr_create(b
->shader
, 0);
1479 qlv
->op
= nir_texop_query_levels
;
1480 setup_texture_info(qlv
, tgsi_inst
->Texture
.Texture
);
1482 assert(tgsi_inst
->Src
[1].Register
.File
== TGSI_FILE_SAMPLER
);
1483 txs
->sampler_index
= tgsi_inst
->Src
[1].Register
.Index
;
1484 qlv
->sampler_index
= tgsi_inst
->Src
[1].Register
.Index
;
1486 /* only single src, the lod: */
1487 txs
->src
[0].src
= nir_src_for_ssa(ttn_channel(b
, src
[0], X
));
1488 txs
->src
[0].src_type
= nir_tex_src_lod
;
1490 nir_ssa_dest_init(&txs
->instr
, &txs
->dest
, 3, NULL
);
1491 nir_builder_instr_insert(b
, &txs
->instr
);
1493 nir_ssa_dest_init(&qlv
->instr
, &qlv
->dest
, 1, NULL
);
1494 nir_builder_instr_insert(b
, &qlv
->instr
);
1496 ttn_move_dest_masked(b
, dest
, &txs
->dest
.ssa
, TGSI_WRITEMASK_XYZ
);
1497 ttn_move_dest_masked(b
, dest
, &qlv
->dest
.ssa
, TGSI_WRITEMASK_W
);
1500 static const nir_op op_trans
[TGSI_OPCODE_LAST
] = {
1501 [TGSI_OPCODE_ARL
] = 0,
1502 [TGSI_OPCODE_MOV
] = nir_op_fmov
,
1503 [TGSI_OPCODE_LIT
] = 0,
1504 [TGSI_OPCODE_RCP
] = nir_op_frcp
,
1505 [TGSI_OPCODE_RSQ
] = nir_op_frsq
,
1506 [TGSI_OPCODE_EXP
] = 0,
1507 [TGSI_OPCODE_LOG
] = 0,
1508 [TGSI_OPCODE_MUL
] = nir_op_fmul
,
1509 [TGSI_OPCODE_ADD
] = nir_op_fadd
,
1510 [TGSI_OPCODE_DP3
] = 0,
1511 [TGSI_OPCODE_DP4
] = 0,
1512 [TGSI_OPCODE_DST
] = 0,
1513 [TGSI_OPCODE_MIN
] = nir_op_fmin
,
1514 [TGSI_OPCODE_MAX
] = nir_op_fmax
,
1515 [TGSI_OPCODE_SLT
] = nir_op_slt
,
1516 [TGSI_OPCODE_SGE
] = nir_op_sge
,
1517 [TGSI_OPCODE_MAD
] = nir_op_ffma
,
1518 [TGSI_OPCODE_SUB
] = nir_op_fsub
,
1519 [TGSI_OPCODE_LRP
] = 0,
1520 [TGSI_OPCODE_SQRT
] = nir_op_fsqrt
,
1521 [TGSI_OPCODE_DP2A
] = 0,
1522 [TGSI_OPCODE_FRC
] = nir_op_ffract
,
1523 [TGSI_OPCODE_CLAMP
] = 0,
1524 [TGSI_OPCODE_FLR
] = nir_op_ffloor
,
1525 [TGSI_OPCODE_ROUND
] = nir_op_fround_even
,
1526 [TGSI_OPCODE_EX2
] = nir_op_fexp2
,
1527 [TGSI_OPCODE_LG2
] = nir_op_flog2
,
1528 [TGSI_OPCODE_POW
] = nir_op_fpow
,
1529 [TGSI_OPCODE_XPD
] = 0,
1530 [TGSI_OPCODE_ABS
] = nir_op_fabs
,
1531 [TGSI_OPCODE_DPH
] = 0,
1532 [TGSI_OPCODE_COS
] = nir_op_fcos
,
1533 [TGSI_OPCODE_DDX
] = nir_op_fddx
,
1534 [TGSI_OPCODE_DDY
] = nir_op_fddy
,
1535 [TGSI_OPCODE_KILL
] = 0,
1536 [TGSI_OPCODE_PK2H
] = 0, /* XXX */
1537 [TGSI_OPCODE_PK2US
] = 0, /* XXX */
1538 [TGSI_OPCODE_PK4B
] = 0, /* XXX */
1539 [TGSI_OPCODE_PK4UB
] = 0, /* XXX */
1540 [TGSI_OPCODE_SEQ
] = nir_op_seq
,
1541 [TGSI_OPCODE_SGT
] = 0,
1542 [TGSI_OPCODE_SIN
] = nir_op_fsin
,
1543 [TGSI_OPCODE_SNE
] = nir_op_sne
,
1544 [TGSI_OPCODE_SLE
] = 0,
1545 [TGSI_OPCODE_TEX
] = 0,
1546 [TGSI_OPCODE_TXD
] = 0,
1547 [TGSI_OPCODE_TXP
] = 0,
1548 [TGSI_OPCODE_UP2H
] = 0, /* XXX */
1549 [TGSI_OPCODE_UP2US
] = 0, /* XXX */
1550 [TGSI_OPCODE_UP4B
] = 0, /* XXX */
1551 [TGSI_OPCODE_UP4UB
] = 0, /* XXX */
1552 [TGSI_OPCODE_ARR
] = 0,
1554 /* No function calls, yet. */
1555 [TGSI_OPCODE_CAL
] = 0, /* XXX */
1556 [TGSI_OPCODE_RET
] = 0, /* XXX */
1558 [TGSI_OPCODE_SSG
] = nir_op_fsign
,
1559 [TGSI_OPCODE_CMP
] = 0,
1560 [TGSI_OPCODE_SCS
] = 0,
1561 [TGSI_OPCODE_TXB
] = 0,
1562 [TGSI_OPCODE_DIV
] = nir_op_fdiv
,
1563 [TGSI_OPCODE_DP2
] = 0,
1564 [TGSI_OPCODE_DP2A
] = 0,
1565 [TGSI_OPCODE_TXL
] = 0,
1567 [TGSI_OPCODE_BRK
] = 0,
1568 [TGSI_OPCODE_IF
] = 0,
1569 [TGSI_OPCODE_UIF
] = 0,
1570 [TGSI_OPCODE_ELSE
] = 0,
1571 [TGSI_OPCODE_ENDIF
] = 0,
1573 [TGSI_OPCODE_DDX_FINE
] = nir_op_fddx_fine
,
1574 [TGSI_OPCODE_DDY_FINE
] = nir_op_fddy_fine
,
1576 [TGSI_OPCODE_PUSHA
] = 0, /* XXX */
1577 [TGSI_OPCODE_POPA
] = 0, /* XXX */
1579 [TGSI_OPCODE_CEIL
] = nir_op_fceil
,
1580 [TGSI_OPCODE_I2F
] = nir_op_i2f
,
1581 [TGSI_OPCODE_NOT
] = nir_op_inot
,
1582 [TGSI_OPCODE_TRUNC
] = nir_op_ftrunc
,
1583 [TGSI_OPCODE_SHL
] = nir_op_ishl
,
1584 [TGSI_OPCODE_AND
] = nir_op_iand
,
1585 [TGSI_OPCODE_OR
] = nir_op_ior
,
1586 [TGSI_OPCODE_MOD
] = nir_op_umod
,
1587 [TGSI_OPCODE_XOR
] = nir_op_ixor
,
1588 [TGSI_OPCODE_SAD
] = 0, /* XXX */
1589 [TGSI_OPCODE_TXF
] = 0,
1590 [TGSI_OPCODE_TXQ
] = 0,
1592 [TGSI_OPCODE_CONT
] = 0,
1594 [TGSI_OPCODE_EMIT
] = 0, /* XXX */
1595 [TGSI_OPCODE_ENDPRIM
] = 0, /* XXX */
1597 [TGSI_OPCODE_BGNLOOP
] = 0,
1598 [TGSI_OPCODE_BGNSUB
] = 0, /* XXX: no function calls */
1599 [TGSI_OPCODE_ENDLOOP
] = 0,
1600 [TGSI_OPCODE_ENDSUB
] = 0, /* XXX: no function calls */
1602 [TGSI_OPCODE_TXQ_LZ
] = 0,
1603 [TGSI_OPCODE_NOP
] = 0,
1604 [TGSI_OPCODE_FSEQ
] = nir_op_feq
,
1605 [TGSI_OPCODE_FSGE
] = nir_op_fge
,
1606 [TGSI_OPCODE_FSLT
] = nir_op_flt
,
1607 [TGSI_OPCODE_FSNE
] = nir_op_fne
,
1609 /* No control flow yet */
1610 [TGSI_OPCODE_CALLNZ
] = 0, /* XXX */
1611 [TGSI_OPCODE_BREAKC
] = 0, /* not emitted by glsl_to_tgsi.cpp */
1613 [TGSI_OPCODE_KILL_IF
] = 0,
1615 [TGSI_OPCODE_END
] = 0,
1617 [TGSI_OPCODE_F2I
] = nir_op_f2i
,
1618 [TGSI_OPCODE_IDIV
] = nir_op_idiv
,
1619 [TGSI_OPCODE_IMAX
] = nir_op_imax
,
1620 [TGSI_OPCODE_IMIN
] = nir_op_imin
,
1621 [TGSI_OPCODE_INEG
] = nir_op_ineg
,
1622 [TGSI_OPCODE_ISGE
] = nir_op_ige
,
1623 [TGSI_OPCODE_ISHR
] = nir_op_ishr
,
1624 [TGSI_OPCODE_ISLT
] = nir_op_ilt
,
1625 [TGSI_OPCODE_F2U
] = nir_op_f2u
,
1626 [TGSI_OPCODE_U2F
] = nir_op_u2f
,
1627 [TGSI_OPCODE_UADD
] = nir_op_iadd
,
1628 [TGSI_OPCODE_UDIV
] = nir_op_udiv
,
1629 [TGSI_OPCODE_UMAD
] = 0,
1630 [TGSI_OPCODE_UMAX
] = nir_op_umax
,
1631 [TGSI_OPCODE_UMIN
] = nir_op_umin
,
1632 [TGSI_OPCODE_UMOD
] = nir_op_umod
,
1633 [TGSI_OPCODE_UMUL
] = nir_op_imul
,
1634 [TGSI_OPCODE_USEQ
] = nir_op_ieq
,
1635 [TGSI_OPCODE_USGE
] = nir_op_uge
,
1636 [TGSI_OPCODE_USHR
] = nir_op_ushr
,
1637 [TGSI_OPCODE_USLT
] = nir_op_ult
,
1638 [TGSI_OPCODE_USNE
] = nir_op_ine
,
1640 [TGSI_OPCODE_SWITCH
] = 0, /* not emitted by glsl_to_tgsi.cpp */
1641 [TGSI_OPCODE_CASE
] = 0, /* not emitted by glsl_to_tgsi.cpp */
1642 [TGSI_OPCODE_DEFAULT
] = 0, /* not emitted by glsl_to_tgsi.cpp */
1643 [TGSI_OPCODE_ENDSWITCH
] = 0, /* not emitted by glsl_to_tgsi.cpp */
1645 /* XXX: SAMPLE opcodes */
1647 [TGSI_OPCODE_UARL
] = nir_op_imov
,
1648 [TGSI_OPCODE_UCMP
] = 0,
1649 [TGSI_OPCODE_IABS
] = nir_op_iabs
,
1650 [TGSI_OPCODE_ISSG
] = nir_op_isign
,
1654 [TGSI_OPCODE_TEX2
] = 0,
1655 [TGSI_OPCODE_TXB2
] = 0,
1656 [TGSI_OPCODE_TXL2
] = 0,
1658 [TGSI_OPCODE_IMUL_HI
] = nir_op_imul_high
,
1659 [TGSI_OPCODE_UMUL_HI
] = nir_op_umul_high
,
1661 [TGSI_OPCODE_TG4
] = 0,
1662 [TGSI_OPCODE_LODQ
] = 0,
1664 [TGSI_OPCODE_IBFE
] = nir_op_ibitfield_extract
,
1665 [TGSI_OPCODE_UBFE
] = nir_op_ubitfield_extract
,
1666 [TGSI_OPCODE_BFI
] = nir_op_bitfield_insert
,
1667 [TGSI_OPCODE_BREV
] = nir_op_bitfield_reverse
,
1668 [TGSI_OPCODE_POPC
] = nir_op_bit_count
,
1669 [TGSI_OPCODE_LSB
] = nir_op_find_lsb
,
1670 [TGSI_OPCODE_IMSB
] = nir_op_ifind_msb
,
1671 [TGSI_OPCODE_UMSB
] = nir_op_ufind_msb
,
1673 [TGSI_OPCODE_INTERP_CENTROID
] = 0, /* XXX */
1674 [TGSI_OPCODE_INTERP_SAMPLE
] = 0, /* XXX */
1675 [TGSI_OPCODE_INTERP_OFFSET
] = 0, /* XXX */
1679 ttn_emit_instruction(struct ttn_compile
*c
)
1681 nir_builder
*b
= &c
->build
;
1682 struct tgsi_full_instruction
*tgsi_inst
= &c
->token
->FullInstruction
;
1684 unsigned tgsi_op
= tgsi_inst
->Instruction
.Opcode
;
1685 struct tgsi_full_dst_register
*tgsi_dst
= &tgsi_inst
->Dst
[0];
1687 if (tgsi_op
== TGSI_OPCODE_END
)
1690 nir_ssa_def
*src
[TGSI_FULL_MAX_SRC_REGISTERS
];
1691 for (i
= 0; i
< tgsi_inst
->Instruction
.NumSrcRegs
; i
++) {
1692 src
[i
] = ttn_get_src(c
, &tgsi_inst
->Src
[i
]);
1694 nir_alu_dest dest
= ttn_get_dest(c
, tgsi_dst
);
1697 case TGSI_OPCODE_RSQ
:
1698 ttn_move_dest(b
, dest
, nir_frsq(b
, ttn_channel(b
, src
[0], X
)));
1701 case TGSI_OPCODE_SQRT
:
1702 ttn_move_dest(b
, dest
, nir_fsqrt(b
, ttn_channel(b
, src
[0], X
)));
1705 case TGSI_OPCODE_RCP
:
1706 ttn_move_dest(b
, dest
, nir_frcp(b
, ttn_channel(b
, src
[0], X
)));
1709 case TGSI_OPCODE_EX2
:
1710 ttn_move_dest(b
, dest
, nir_fexp2(b
, ttn_channel(b
, src
[0], X
)));
1713 case TGSI_OPCODE_LG2
:
1714 ttn_move_dest(b
, dest
, nir_flog2(b
, ttn_channel(b
, src
[0], X
)));
1717 case TGSI_OPCODE_POW
:
1718 ttn_move_dest(b
, dest
, nir_fpow(b
,
1719 ttn_channel(b
, src
[0], X
),
1720 ttn_channel(b
, src
[1], X
)));
1723 case TGSI_OPCODE_COS
:
1724 ttn_move_dest(b
, dest
, nir_fcos(b
, ttn_channel(b
, src
[0], X
)));
1727 case TGSI_OPCODE_SIN
:
1728 ttn_move_dest(b
, dest
, nir_fsin(b
, ttn_channel(b
, src
[0], X
)));
1731 case TGSI_OPCODE_ARL
:
1732 ttn_arl(b
, op_trans
[tgsi_op
], dest
, src
);
1735 case TGSI_OPCODE_EXP
:
1736 ttn_exp(b
, op_trans
[tgsi_op
], dest
, src
);
1739 case TGSI_OPCODE_LOG
:
1740 ttn_log(b
, op_trans
[tgsi_op
], dest
, src
);
1743 case TGSI_OPCODE_DST
:
1744 ttn_dst(b
, op_trans
[tgsi_op
], dest
, src
);
1747 case TGSI_OPCODE_LIT
:
1748 ttn_lit(b
, op_trans
[tgsi_op
], dest
, src
);
1751 case TGSI_OPCODE_CLAMP
:
1752 ttn_clamp(b
, op_trans
[tgsi_op
], dest
, src
);
1755 case TGSI_OPCODE_XPD
:
1756 ttn_xpd(b
, op_trans
[tgsi_op
], dest
, src
);
1759 case TGSI_OPCODE_DP2
:
1760 ttn_dp2(b
, op_trans
[tgsi_op
], dest
, src
);
1763 case TGSI_OPCODE_DP3
:
1764 ttn_dp3(b
, op_trans
[tgsi_op
], dest
, src
);
1767 case TGSI_OPCODE_DP4
:
1768 ttn_dp4(b
, op_trans
[tgsi_op
], dest
, src
);
1771 case TGSI_OPCODE_DP2A
:
1772 ttn_dp2a(b
, op_trans
[tgsi_op
], dest
, src
);
1775 case TGSI_OPCODE_DPH
:
1776 ttn_dph(b
, op_trans
[tgsi_op
], dest
, src
);
1779 case TGSI_OPCODE_UMAD
:
1780 ttn_umad(b
, op_trans
[tgsi_op
], dest
, src
);
1783 case TGSI_OPCODE_LRP
:
1784 ttn_move_dest(b
, dest
, nir_flrp(b
, src
[2], src
[1], src
[0]));
1787 case TGSI_OPCODE_KILL
:
1788 ttn_kill(b
, op_trans
[tgsi_op
], dest
, src
);
1791 case TGSI_OPCODE_ARR
:
1792 ttn_arr(b
, op_trans
[tgsi_op
], dest
, src
);
1795 case TGSI_OPCODE_CMP
:
1796 ttn_cmp(b
, op_trans
[tgsi_op
], dest
, src
);
1799 case TGSI_OPCODE_UCMP
:
1800 ttn_ucmp(b
, op_trans
[tgsi_op
], dest
, src
);
1803 case TGSI_OPCODE_SCS
:
1804 ttn_scs(b
, op_trans
[tgsi_op
], dest
, src
);
1807 case TGSI_OPCODE_SGT
:
1808 ttn_sgt(b
, op_trans
[tgsi_op
], dest
, src
);
1811 case TGSI_OPCODE_SLE
:
1812 ttn_sle(b
, op_trans
[tgsi_op
], dest
, src
);
1815 case TGSI_OPCODE_KILL_IF
:
1816 ttn_kill_if(b
, op_trans
[tgsi_op
], dest
, src
);
1819 case TGSI_OPCODE_TEX
:
1820 case TGSI_OPCODE_TXP
:
1821 case TGSI_OPCODE_TXL
:
1822 case TGSI_OPCODE_TXB
:
1823 case TGSI_OPCODE_TXD
:
1824 case TGSI_OPCODE_TEX2
:
1825 case TGSI_OPCODE_TXL2
:
1826 case TGSI_OPCODE_TXB2
:
1827 case TGSI_OPCODE_TXQ_LZ
:
1828 case TGSI_OPCODE_TXF
:
1829 case TGSI_OPCODE_TG4
:
1830 case TGSI_OPCODE_LODQ
:
1831 ttn_tex(c
, dest
, src
);
1834 case TGSI_OPCODE_TXQ
:
1835 ttn_txq(c
, dest
, src
);
1838 case TGSI_OPCODE_NOP
:
1841 case TGSI_OPCODE_IF
:
1842 ttn_if(c
, src
[0], false);
1845 case TGSI_OPCODE_UIF
:
1846 ttn_if(c
, src
[0], true);
1849 case TGSI_OPCODE_ELSE
:
1853 case TGSI_OPCODE_ENDIF
:
1857 case TGSI_OPCODE_BGNLOOP
:
1861 case TGSI_OPCODE_BRK
:
1865 case TGSI_OPCODE_CONT
:
1869 case TGSI_OPCODE_ENDLOOP
:
1874 if (op_trans
[tgsi_op
] != 0 || tgsi_op
== TGSI_OPCODE_MOV
) {
1875 ttn_alu(b
, op_trans
[tgsi_op
], dest
, src
);
1877 fprintf(stderr
, "unknown TGSI opcode: %s\n",
1878 tgsi_get_opcode_name(tgsi_op
));
1884 if (tgsi_inst
->Instruction
.Saturate
) {
1885 assert(!dest
.dest
.is_ssa
);
1886 ttn_move_dest(b
, dest
, nir_fsat(b
, ttn_src_for_dest(b
, &dest
)));
1889 /* if the dst has a matching var, append store_global to move
1890 * output from reg to var
1892 nir_variable
*var
= ttn_get_var(c
, tgsi_dst
);
1894 unsigned index
= tgsi_dst
->Register
.Index
;
1895 unsigned offset
= c
->temp_regs
[index
].offset
;
1896 nir_intrinsic_instr
*store
=
1897 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_store_var
);
1898 struct tgsi_ind_register
*indirect
= tgsi_dst
->Register
.Indirect
?
1899 &tgsi_dst
->Indirect
: NULL
;
1901 store
->num_components
= 4;
1902 store
->const_index
[0] = 0xf;
1903 store
->variables
[0] = ttn_array_deref(c
, store
, var
, offset
, indirect
);
1904 store
->src
[0] = nir_src_for_reg(dest
.dest
.reg
.reg
);
1906 nir_builder_instr_insert(b
, &store
->instr
);
1911 * Puts a NIR intrinsic to store of each TGSI_FILE_OUTPUT value to the output
1912 * variables at the end of the shader.
1914 * We don't generate these incrementally as the TGSI_FILE_OUTPUT values are
1915 * written, because there's no output load intrinsic, which means we couldn't
1916 * handle writemasks.
1919 ttn_add_output_stores(struct ttn_compile
*c
)
1921 nir_builder
*b
= &c
->build
;
1923 foreach_list_typed(nir_variable
, var
, node
, &b
->shader
->outputs
) {
1924 unsigned array_len
= MAX2(glsl_get_length(var
->type
), 1);
1927 for (i
= 0; i
< array_len
; i
++) {
1928 nir_intrinsic_instr
*store
=
1929 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_store_output
);
1930 unsigned loc
= var
->data
.driver_location
+ i
;
1931 store
->num_components
= 4;
1932 store
->src
[0].reg
.reg
= c
->output_regs
[loc
].reg
;
1933 store
->src
[0].reg
.base_offset
= c
->output_regs
[loc
].offset
;
1934 store
->const_index
[0] = loc
;
1935 store
->src
[1] = nir_src_for_ssa(nir_imm_int(b
, 0));
1936 nir_builder_instr_insert(b
, &store
->instr
);
1941 static gl_shader_stage
1942 tgsi_processor_to_shader_stage(unsigned processor
)
1944 switch (processor
) {
1945 case TGSI_PROCESSOR_FRAGMENT
: return MESA_SHADER_FRAGMENT
;
1946 case TGSI_PROCESSOR_VERTEX
: return MESA_SHADER_VERTEX
;
1947 case TGSI_PROCESSOR_GEOMETRY
: return MESA_SHADER_GEOMETRY
;
1948 case TGSI_PROCESSOR_TESS_CTRL
: return MESA_SHADER_TESS_CTRL
;
1949 case TGSI_PROCESSOR_TESS_EVAL
: return MESA_SHADER_TESS_EVAL
;
1950 case TGSI_PROCESSOR_COMPUTE
: return MESA_SHADER_COMPUTE
;
1952 unreachable("invalid TGSI processor");
1957 tgsi_to_nir(const void *tgsi_tokens
,
1958 const nir_shader_compiler_options
*options
)
1960 struct tgsi_parse_context parser
;
1961 struct tgsi_shader_info scan
;
1962 struct ttn_compile
*c
;
1963 struct nir_shader
*s
;
1966 c
= rzalloc(NULL
, struct ttn_compile
);
1968 tgsi_scan_shader(tgsi_tokens
, &scan
);
1971 nir_builder_init_simple_shader(&c
->build
, NULL
,
1972 tgsi_processor_to_shader_stage(scan
.processor
),
1974 s
= c
->build
.shader
;
1976 s
->num_inputs
= scan
.file_max
[TGSI_FILE_INPUT
] + 1;
1977 s
->num_uniforms
= scan
.const_file_max
[0] + 1;
1978 s
->num_outputs
= scan
.file_max
[TGSI_FILE_OUTPUT
] + 1;
1980 c
->output_regs
= rzalloc_array(c
, struct ttn_reg_info
,
1981 scan
.file_max
[TGSI_FILE_OUTPUT
] + 1);
1982 c
->temp_regs
= rzalloc_array(c
, struct ttn_reg_info
,
1983 scan
.file_max
[TGSI_FILE_TEMPORARY
] + 1);
1984 c
->imm_defs
= rzalloc_array(c
, nir_ssa_def
*,
1985 scan
.file_max
[TGSI_FILE_IMMEDIATE
] + 1);
1987 c
->num_samp_types
= scan
.file_max
[TGSI_FILE_SAMPLER_VIEW
] + 1;
1988 c
->samp_types
= rzalloc_array(c
, nir_alu_type
, c
->num_samp_types
);
1990 c
->if_stack
= rzalloc_array(c
, nir_cursor
,
1991 (scan
.opcode_count
[TGSI_OPCODE_IF
] +
1992 scan
.opcode_count
[TGSI_OPCODE_UIF
]) * 2);
1993 c
->loop_stack
= rzalloc_array(c
, nir_cursor
,
1994 scan
.opcode_count
[TGSI_OPCODE_BGNLOOP
]);
1996 ret
= tgsi_parse_init(&parser
, tgsi_tokens
);
1997 assert(ret
== TGSI_PARSE_OK
);
1999 while (!tgsi_parse_end_of_tokens(&parser
)) {
2000 tgsi_parse_token(&parser
);
2001 c
->token
= &parser
.FullToken
;
2003 switch (parser
.FullToken
.Token
.Type
) {
2004 case TGSI_TOKEN_TYPE_DECLARATION
:
2005 ttn_emit_declaration(c
);
2008 case TGSI_TOKEN_TYPE_INSTRUCTION
:
2009 ttn_emit_instruction(c
);
2012 case TGSI_TOKEN_TYPE_IMMEDIATE
:
2013 ttn_emit_immediate(c
);
2018 tgsi_parse_free(&parser
);
2020 ttn_add_output_stores(c
);