1 /* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
4 * Copyright (C) 2013 Rob Clark <robclark@freedesktop.org>
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26 * Rob Clark <robclark@freedesktop.org>
31 #include "pipe/p_state.h"
32 #include "util/u_string.h"
33 #include "util/u_memory.h"
34 #include "util/u_inlines.h"
35 #include "tgsi/tgsi_lowering.h"
36 #include "tgsi/tgsi_parse.h"
37 #include "tgsi/tgsi_ureg.h"
38 #include "tgsi/tgsi_info.h"
39 #include "tgsi/tgsi_strings.h"
40 #include "tgsi/tgsi_dump.h"
41 #include "tgsi/tgsi_scan.h"
43 #include "freedreno_util.h"
45 #include "ir3_compiler.h"
46 #include "ir3_shader.h"
48 #include "instr-a3xx.h"
51 struct ir3_compile_context
{
52 const struct tgsi_token
*tokens
;
55 struct ir3_shader_variant
*so
;
58 struct ir3_block
*block
;
59 struct ir3_instruction
*current_instr
;
61 /* we need to defer updates to block->outputs[] until the end
62 * of an instruction (so we don't see new value until *after*
63 * the src registers are processed)
66 struct ir3_instruction
*instr
, **instrp
;
68 unsigned num_output_updates
;
70 /* are we in a sequence of "atomic" instructions?
74 /* For fragment shaders, from the hw perspective the only
75 * actual input is r0.xy position register passed to bary.f.
76 * But TGSI doesn't know that, it still declares things as
77 * IN[] registers. So we do all the input tracking normally
78 * and fix things up after compile_instructions()
80 * NOTE that frag_pos is the hardware position (possibly it
81 * is actually an index or tag or some such.. it is *not*
82 * values that can be directly used for gl_FragCoord..)
84 struct ir3_instruction
*frag_pos
, *frag_face
, *frag_coord
[4];
86 /* For vertex shaders, keep track of the system values sources */
87 struct ir3_instruction
*vertex_id
, *basevertex
, *instance_id
;
89 struct tgsi_parse_context parser
;
92 struct tgsi_shader_info info
;
94 /* hmm, would be nice if tgsi_scan_shader figured this out
99 struct ir3_instruction
*fanin
;
101 uint32_t array_dirty
;
102 /* offset into array[], per file, of first array info */
103 uint8_t array_offsets
[TGSI_FILE_COUNT
];
105 /* for calculating input/output positions/linkages: */
108 /* a4xx (at least patchlevel 0) cannot seem to flat-interpolate
109 * so we need to use ldlv.u32 to load the varying directly:
113 unsigned num_internal_temps
;
114 struct tgsi_src_register internal_temps
[8];
116 /* for looking up which system value is which */
117 unsigned sysval_semantics
[8];
119 /* idx/slot for last compiler generated immediate */
120 unsigned immediate_idx
;
122 /* stack of branch instructions that mark (potentially nested)
123 * branch if/else/loop/etc
126 struct ir3_instruction
*instr
, *cond
;
127 bool inv
; /* true iff in else leg of branch */
129 unsigned int branch_count
;
131 /* list of kill instructions: */
132 struct ir3_instruction
*kill
[16];
133 unsigned int kill_count
;
135 /* used when dst is same as one of the src, to avoid overwriting a
136 * src element before the remaining scalar instructions that make
137 * up the vector operation
139 struct tgsi_dst_register tmp_dst
;
140 struct tgsi_src_register
*tmp_src
;
142 /* just for catching incorrect use of get_dst()/put_dst():
148 static void vectorize(struct ir3_compile_context
*ctx
,
149 struct ir3_instruction
*instr
, struct tgsi_dst_register
*dst
,
151 static void create_mov(struct ir3_compile_context
*ctx
,
152 struct tgsi_dst_register
*dst
, struct tgsi_src_register
*src
);
153 static type_t
get_ftype(struct ir3_compile_context
*ctx
);
155 static unsigned setup_arrays(struct ir3_compile_context
*ctx
, unsigned file
, unsigned i
)
157 /* ArrayID 0 for a given file is the legacy array spanning the entire file: */
158 ctx
->array
[i
].first
= 0;
159 ctx
->array
[i
].last
= ctx
->info
.file_max
[file
];
160 ctx
->array_offsets
[file
] = i
;
161 i
+= ctx
->info
.array_max
[file
] + 1;
166 compile_init(struct ir3_compile_context
*ctx
, struct ir3_shader_variant
*so
,
167 const struct tgsi_token
*tokens
)
170 struct tgsi_shader_info
*info
= &ctx
->info
;
171 struct tgsi_lowering_config lconfig
= {
172 .color_two_side
= so
->key
.color_two_side
,
190 case SHADER_FRAGMENT
:
192 lconfig
.saturate_s
= so
->key
.fsaturate_s
;
193 lconfig
.saturate_t
= so
->key
.fsaturate_t
;
194 lconfig
.saturate_r
= so
->key
.fsaturate_r
;
195 ctx
->integer_s
= so
->key
.finteger_s
;
198 lconfig
.saturate_s
= so
->key
.vsaturate_s
;
199 lconfig
.saturate_t
= so
->key
.vsaturate_t
;
200 lconfig
.saturate_r
= so
->key
.vsaturate_r
;
201 ctx
->integer_s
= so
->key
.vinteger_s
;
206 /* hack for standalone compiler which does not have
209 } else if (ir3_shader_gpuid(so
->shader
) >= 400) {
210 /* a4xx seems to have *no* sam.p */
211 lconfig
.lower_TXP
= ~0; /* lower all txp */
212 /* need special handling for "flat" */
213 ctx
->flat_bypass
= true;
215 /* a3xx just needs to avoid sam.p for 3d tex */
216 lconfig
.lower_TXP
= (1 << TGSI_TEXTURE_3D
);
217 /* no special handling for "flat" */
218 ctx
->flat_bypass
= false;
221 ctx
->tokens
= tgsi_transform_lowering(&lconfig
, tokens
, &ctx
->info
);
222 ctx
->free_tokens
= !!ctx
->tokens
;
225 ctx
->tokens
= tokens
;
229 ctx
->array_dirty
= 0;
231 ctx
->num_internal_temps
= 0;
232 ctx
->branch_count
= 0;
235 ctx
->current_instr
= NULL
;
236 ctx
->num_output_updates
= 0;
238 ctx
->frag_pos
= NULL
;
239 ctx
->frag_face
= NULL
;
240 ctx
->vertex_id
= NULL
;
241 ctx
->instance_id
= NULL
;
243 ctx
->using_tmp_dst
= false;
245 memset(ctx
->frag_coord
, 0, sizeof(ctx
->frag_coord
));
246 memset(ctx
->array
, 0, sizeof(ctx
->array
));
247 memset(ctx
->array_offsets
, 0, sizeof(ctx
->array_offsets
));
249 #define FM(x) (1 << TGSI_FILE_##x)
250 /* NOTE: if relative addressing is used, we set constlen in
251 * the compiler (to worst-case value) since we don't know in
252 * the assembler what the max addr reg value can be:
254 if (info
->indirect_files
& FM(CONSTANT
))
255 so
->constlen
= ctx
->info
.file_max
[TGSI_FILE_CONSTANT
] + 1;
258 i
+= setup_arrays(ctx
, TGSI_FILE_INPUT
, i
);
259 i
+= setup_arrays(ctx
, TGSI_FILE_TEMPORARY
, i
);
260 i
+= setup_arrays(ctx
, TGSI_FILE_OUTPUT
, i
);
261 /* any others? we don't track arrays for const..*/
263 /* Immediates go after constants: */
264 if (so
->type
== SHADER_VERTEX
) {
265 so
->first_driver_param
= info
->file_max
[TGSI_FILE_CONSTANT
] + 1;
266 so
->first_immediate
= so
->first_driver_param
+ 1;
268 so
->first_immediate
= info
->file_max
[TGSI_FILE_CONSTANT
] + 1;
270 ctx
->immediate_idx
= 4 * (ctx
->info
.file_max
[TGSI_FILE_IMMEDIATE
] + 1);
272 ret
= tgsi_parse_init(&ctx
->parser
, ctx
->tokens
);
273 if (ret
!= TGSI_PARSE_OK
)
276 ctx
->type
= ctx
->parser
.FullHeader
.Processor
.Processor
;
282 compile_error(struct ir3_compile_context
*ctx
, const char *format
, ...)
285 va_start(ap
, format
);
286 _debug_vprintf(format
, ap
);
288 tgsi_dump(ctx
->tokens
, 0);
292 #define compile_assert(ctx, cond) do { \
293 if (!(cond)) compile_error((ctx), "failed assert: "#cond"\n"); \
297 compile_free(struct ir3_compile_context
*ctx
)
299 if (ctx
->free_tokens
)
300 free((void *)ctx
->tokens
);
301 tgsi_parse_free(&ctx
->parser
);
304 struct instr_translater
{
305 void (*fxn
)(const struct instr_translater
*t
,
306 struct ir3_compile_context
*ctx
,
307 struct tgsi_full_instruction
*inst
);
310 opc_t hopc
; /* opc to use for half_precision mode, if different */
315 instr_finish(struct ir3_compile_context
*ctx
)
322 for (i
= 0; i
< ctx
->num_output_updates
; i
++)
323 *(ctx
->output_updates
[i
].instrp
) = ctx
->output_updates
[i
].instr
;
325 ctx
->num_output_updates
= 0;
327 while (ctx
->array_dirty
) {
328 unsigned aid
= ffs(ctx
->array_dirty
) - 1;
329 ctx
->array
[aid
].fanin
= NULL
;
330 ctx
->array_dirty
&= ~(1 << aid
);
334 /* For "atomic" groups of instructions, for example the four scalar
335 * instructions to perform a vec4 operation. Basically this just
336 * blocks out handling of output_updates so the next scalar instruction
337 * still sees the result from before the start of the atomic group.
339 * NOTE: when used properly, this could probably replace get/put_dst()
343 instr_atomic_start(struct ir3_compile_context
*ctx
)
349 instr_atomic_end(struct ir3_compile_context
*ctx
)
355 static struct ir3_instruction
*
356 instr_create(struct ir3_compile_context
*ctx
, int category
, opc_t opc
)
359 return (ctx
->current_instr
= ir3_instr_create(ctx
->block
, category
, opc
));
362 static struct ir3_block
*
363 push_block(struct ir3_compile_context
*ctx
)
365 struct ir3_block
*block
;
366 unsigned ntmp
, nin
, nout
;
368 #define SCALAR_REGS(file) (4 * (ctx->info.file_max[TGSI_FILE_ ## file] + 1))
370 /* hmm, give ourselves room to create 8 extra temporaries (vec4):
372 ntmp
= SCALAR_REGS(TEMPORARY
);
375 nout
= SCALAR_REGS(OUTPUT
);
376 nin
= SCALAR_REGS(INPUT
) + SCALAR_REGS(SYSTEM_VALUE
);
378 /* for outermost block, 'inputs' are the actual shader INPUT
379 * register file. Reads from INPUT registers always go back to
380 * top block. For nested blocks, 'inputs' is used to track any
381 * TEMPORARY file register from one of the enclosing blocks that
382 * is ready in this block.
385 /* NOTE: fragment shaders actually have two inputs (r0.xy, the
388 if (ctx
->type
== TGSI_PROCESSOR_FRAGMENT
) {
390 if (ctx
->info
.reads_position
)
392 if (ctx
->info
.uses_frontface
)
395 nout
+= ARRAY_SIZE(ctx
->kill
);
401 block
= ir3_block_create(ctx
->ir
, ntmp
, nin
, nout
);
403 if ((ctx
->type
== TGSI_PROCESSOR_FRAGMENT
) && !ctx
->block
)
404 block
->noutputs
-= ARRAY_SIZE(ctx
->kill
);
406 block
->parent
= ctx
->block
;
413 pop_block(struct ir3_compile_context
*ctx
)
415 ctx
->block
= ctx
->block
->parent
;
416 compile_assert(ctx
, ctx
->block
);
419 static struct ir3_instruction
*
420 create_output(struct ir3_block
*block
, struct ir3_instruction
*instr
,
423 struct ir3_instruction
*out
;
425 out
= ir3_instr_create(block
, -1, OPC_META_OUTPUT
);
426 out
->inout
.block
= block
;
427 ir3_reg_create(out
, n
, 0);
429 ir3_reg_create(out
, 0, IR3_REG_SSA
)->instr
= instr
;
434 static struct ir3_instruction
*
435 create_input(struct ir3_block
*block
, struct ir3_instruction
*instr
,
438 struct ir3_instruction
*in
;
440 in
= ir3_instr_create(block
, -1, OPC_META_INPUT
);
441 in
->inout
.block
= block
;
442 ir3_reg_create(in
, n
, 0);
444 ir3_reg_create(in
, 0, IR3_REG_SSA
)->instr
= instr
;
449 static struct ir3_instruction
*
450 block_input(struct ir3_block
*block
, unsigned n
)
452 /* references to INPUT register file always go back up to
456 return block_input(block
->parent
, n
);
457 return block
->inputs
[n
];
460 /* return temporary in scope, creating if needed meta-input node
461 * to track block inputs
463 static struct ir3_instruction
*
464 block_temporary(struct ir3_block
*block
, unsigned n
)
466 /* references to TEMPORARY register file, find the nearest
467 * enclosing block which has already assigned this temporary,
468 * creating meta-input instructions along the way to keep
469 * track of block inputs
471 if (block
->parent
&& !block
->temporaries
[n
]) {
472 /* if already have input for this block, reuse: */
473 if (!block
->inputs
[n
])
474 block
->inputs
[n
] = block_temporary(block
->parent
, n
);
476 /* and create new input to return: */
477 return create_input(block
, block
->inputs
[n
], n
);
479 return block
->temporaries
[n
];
482 static struct ir3_instruction
*
483 create_immed(struct ir3_compile_context
*ctx
, float val
)
485 /* NOTE: *don't* use instr_create() here!
487 struct ir3_instruction
*instr
;
488 instr
= ir3_instr_create(ctx
->block
, 1, 0);
489 instr
->cat1
.src_type
= get_ftype(ctx
);
490 instr
->cat1
.dst_type
= get_ftype(ctx
);
491 ir3_reg_create(instr
, 0, 0);
492 ir3_reg_create(instr
, 0, IR3_REG_IMMED
)->fim_val
= val
;
497 ssa_instr_set(struct ir3_compile_context
*ctx
, unsigned file
, unsigned n
,
498 struct ir3_instruction
*instr
)
500 struct ir3_block
*block
= ctx
->block
;
501 unsigned idx
= ctx
->num_output_updates
;
503 compile_assert(ctx
, idx
< ARRAY_SIZE(ctx
->output_updates
));
505 /* NOTE: defer update of temporaries[idx] or output[idx]
506 * until instr_finish(), so that if the current instruction
507 * reads the same TEMP/OUT[] it gets the old value:
509 * bleh.. this might be a bit easier to just figure out
510 * in instr_finish(). But at that point we've already
511 * lost information about OUTPUT vs TEMPORARY register
516 case TGSI_FILE_OUTPUT
:
517 compile_assert(ctx
, n
< block
->noutputs
);
518 ctx
->output_updates
[idx
].instrp
= &block
->outputs
[n
];
519 ctx
->output_updates
[idx
].instr
= instr
;
520 ctx
->num_output_updates
++;
522 case TGSI_FILE_TEMPORARY
:
523 compile_assert(ctx
, n
< block
->ntemporaries
);
524 ctx
->output_updates
[idx
].instrp
= &block
->temporaries
[n
];
525 ctx
->output_updates
[idx
].instr
= instr
;
526 ctx
->num_output_updates
++;
528 case TGSI_FILE_ADDRESS
:
529 compile_assert(ctx
, n
< 1);
530 ctx
->output_updates
[idx
].instrp
= &block
->address
;
531 ctx
->output_updates
[idx
].instr
= instr
;
532 ctx
->num_output_updates
++;
537 static struct ir3_instruction
*
538 ssa_instr_get(struct ir3_compile_context
*ctx
, unsigned file
, unsigned n
)
540 struct ir3_block
*block
= ctx
->block
;
541 struct ir3_instruction
*instr
= NULL
;
544 case TGSI_FILE_INPUT
:
545 instr
= block_input(ctx
->block
, n
);
547 case TGSI_FILE_OUTPUT
:
548 /* really this should just happen in case of 'MOV_SAT OUT[n], ..',
549 * for the following clamp instructions:
551 instr
= block
->outputs
[n
];
552 /* we don't have to worry about read from an OUTPUT that was
553 * assigned outside of the current block, because the _SAT
554 * clamp instructions will always be in the same block as
555 * the original instruction which wrote the OUTPUT
557 compile_assert(ctx
, instr
);
559 case TGSI_FILE_TEMPORARY
:
560 instr
= block_temporary(ctx
->block
, n
);
562 /* this can happen when registers (or components of a TGSI
563 * register) are used as src before they have been assigned
564 * (undefined contents). To avoid confusing the rest of the
565 * compiler, and to generally keep things peachy, substitute
566 * an instruction that sets the src to 0.0. Or to keep
567 * things undefined, I could plug in a random number? :-P
569 * NOTE: *don't* use instr_create() here!
571 instr
= create_immed(ctx
, 0.0);
572 /* no need to recreate the immed for every access: */
573 block
->temporaries
[n
] = instr
;
576 case TGSI_FILE_SYSTEM_VALUE
:
577 switch (ctx
->sysval_semantics
[n
>> 2]) {
578 case TGSI_SEMANTIC_VERTEXID_NOBASE
:
579 instr
= ctx
->vertex_id
;
581 case TGSI_SEMANTIC_BASEVERTEX
:
582 instr
= ctx
->basevertex
;
584 case TGSI_SEMANTIC_INSTANCEID
:
585 instr
= ctx
->instance_id
;
594 static int dst_array_id(struct ir3_compile_context
*ctx
,
595 const struct tgsi_dst_register
*dst
)
597 // XXX complete hack to recover tgsi_full_dst_register...
598 // nothing that isn't wrapped in a tgsi_full_dst_register
599 // should be indirect
600 const struct tgsi_full_dst_register
*fdst
= (const void *)dst
;
601 return fdst
->Indirect
.ArrayID
+ ctx
->array_offsets
[dst
->File
];
604 static int src_array_id(struct ir3_compile_context
*ctx
,
605 const struct tgsi_src_register
*src
)
607 // XXX complete hack to recover tgsi_full_src_register...
608 // nothing that isn't wrapped in a tgsi_full_src_register
609 // should be indirect
610 const struct tgsi_full_src_register
*fsrc
= (const void *)src
;
611 debug_assert(src
->File
!= TGSI_FILE_CONSTANT
);
612 return fsrc
->Indirect
.ArrayID
+ ctx
->array_offsets
[src
->File
];
615 static struct ir3_instruction
*
616 array_fanin(struct ir3_compile_context
*ctx
, unsigned aid
, unsigned file
)
618 struct ir3_instruction
*instr
;
620 if (ctx
->array
[aid
].fanin
) {
621 instr
= ctx
->array
[aid
].fanin
;
623 unsigned first
= ctx
->array
[aid
].first
;
624 unsigned last
= ctx
->array
[aid
].last
;
627 instr
= ir3_instr_create2(ctx
->block
, -1, OPC_META_FI
,
628 1 + (4 * (last
+ 1 - first
)));
629 ir3_reg_create(instr
, 0, 0);
630 for (i
= first
; i
<= last
; i
++) {
631 for (j
= 0; j
< 4; j
++) {
632 unsigned n
= regid(i
, j
);
633 ir3_reg_create(instr
, 0, IR3_REG_SSA
)->instr
=
634 ssa_instr_get(ctx
, file
, n
);
637 ctx
->array
[aid
].fanin
= instr
;
638 ctx
->array_dirty
|= (1 << aid
);
645 ssa_dst(struct ir3_compile_context
*ctx
, struct ir3_instruction
*instr
,
646 const struct tgsi_dst_register
*dst
, unsigned chan
)
649 struct ir3_register
*reg
= instr
->regs
[0];
650 unsigned i
, aid
= dst_array_id(ctx
, dst
);
651 unsigned first
= ctx
->array
[aid
].first
;
652 unsigned last
= ctx
->array
[aid
].last
;
653 unsigned off
= dst
->Index
- first
; /* vec4 offset */
655 reg
->size
= 4 * (1 + last
- first
);
656 reg
->offset
= regid(off
, chan
);
658 instr
->fanin
= array_fanin(ctx
, aid
, dst
->File
);
660 /* annotate with the array-id, to help out the register-
661 * assignment stage. At least for the case of indirect
662 * writes, we should capture enough dependencies to
663 * preserve the order of reads/writes of the array, so
664 * the multiple "names" for the array should end up all
665 * assigned to the same registers.
667 instr
->fanin
->fi
.aid
= aid
;
669 /* Since we are scalarizing vec4 tgsi instructions/regs, we
670 * run into a slight complication here. To do the naive thing
671 * and setup a fanout for each scalar array element would end
672 * up with the result that the instructions generated for each
673 * component of the vec4 would end up clobbering each other.
674 * So we take advantage here of knowing that the array index
675 * (after the shl.b) will be a multiple of four, and only set
676 * every fourth scalar component in the array. See also
677 * fixup_ssa_dst_array()
679 for (i
= first
; i
<= last
; i
++) {
680 struct ir3_instruction
*split
;
681 unsigned n
= regid(i
, chan
);
682 int off
= (4 * (i
- first
)) + chan
;
684 if (is_meta(instr
) && (instr
->opc
== OPC_META_FO
))
685 off
-= instr
->fo
.off
;
687 split
= ir3_instr_create(ctx
->block
, -1, OPC_META_FO
);
689 ir3_reg_create(split
, 0, 0);
690 ir3_reg_create(split
, 0, IR3_REG_SSA
)->instr
= instr
;
692 ssa_instr_set(ctx
, dst
->File
, n
, split
);
695 /* normal case (not relative addressed GPR) */
696 ssa_instr_set(ctx
, dst
->File
, regid(dst
->Index
, chan
), instr
);
701 ssa_src(struct ir3_compile_context
*ctx
, struct ir3_register
*reg
,
702 const struct tgsi_src_register
*src
, unsigned chan
)
704 struct ir3_instruction
*instr
;
706 if (src
->Indirect
&& (src
->File
!= TGSI_FILE_CONSTANT
)) {
707 /* for relative addressing of gpr's (due to register assignment)
708 * we must generate a fanin instruction to collect all possible
709 * array elements that the instruction could address together:
711 unsigned aid
= src_array_id(ctx
, src
);
712 unsigned first
= ctx
->array
[aid
].first
;
713 unsigned last
= ctx
->array
[aid
].last
;
714 unsigned off
= src
->Index
- first
; /* vec4 offset */
716 reg
->size
= 4 * (1 + last
- first
);
717 reg
->offset
= regid(off
, chan
);
719 instr
= array_fanin(ctx
, aid
, src
->File
);
721 /* normal case (not relative addressed GPR) */
722 instr
= ssa_instr_get(ctx
, src
->File
, regid(src
->Index
, chan
));
726 reg
->flags
|= IR3_REG_SSA
;
728 } else if (reg
->flags
& IR3_REG_SSA
) {
729 /* special hack for trans_samp() which calls ssa_src() directly
730 * to build up the collect (fanin) for const src.. (so SSA flag
731 * set but no src instr... it basically gets lucky because we
732 * default to 0.0 for "undefined" src instructions, which is
733 * what it wants. We probably need to give it a better way to
734 * do this, but for now this hack:
736 reg
->instr
= create_immed(ctx
, 0.0);
740 static struct ir3_register
*
741 add_dst_reg_wrmask(struct ir3_compile_context
*ctx
,
742 struct ir3_instruction
*instr
, const struct tgsi_dst_register
*dst
,
743 unsigned chan
, unsigned wrmask
)
745 unsigned flags
= 0, num
= 0;
746 struct ir3_register
*reg
;
749 case TGSI_FILE_OUTPUT
:
750 case TGSI_FILE_TEMPORARY
:
753 case TGSI_FILE_ADDRESS
:
754 flags
|= IR3_REG_ADDR
;
758 compile_error(ctx
, "unsupported dst register file: %s\n",
759 tgsi_file_name(dst
->File
));
764 flags
|= IR3_REG_RELATIV
;
766 /* shouldn't happen, and we can't cope with it below: */
767 compile_assert(ctx
, wrmask
== 0x1);
769 compile_assert(ctx
, ctx
->block
->address
);
771 compile_assert(ctx
, ctx
->block
->address
== instr
->address
);
773 instr
->address
= ctx
->block
->address
;
774 array_insert(ctx
->ir
->indirects
, instr
);
777 reg
= ir3_reg_create(instr
, regid(num
, chan
), flags
);
778 reg
->wrmask
= wrmask
;
782 ssa_dst(ctx
, instr
, dst
, chan
);
783 } else if ((dst
->File
== TGSI_FILE_TEMPORARY
) ||
784 (dst
->File
== TGSI_FILE_OUTPUT
) ||
785 (dst
->File
== TGSI_FILE_ADDRESS
)) {
786 struct ir3_instruction
*prev
= NULL
;
789 compile_assert(ctx
, !dst
->Indirect
);
791 /* if instruction writes multiple, we need to create
792 * some place-holder collect the registers:
794 for (i
= 0; i
< 4; i
++) {
795 /* NOTE: slightly ugly that we setup neighbor ptrs
796 * for FO here, but handle FI in CP pass.. we should
797 * probably just always setup neighbor ptrs in the
800 struct ir3_instruction
*split
=
801 ir3_instr_create(ctx
->block
, -1, OPC_META_FO
);
803 /* unused dst reg: */
804 /* NOTE: set SSA flag on dst here, because unused FO's
805 * which don't get scheduled will end up not in the
806 * instruction list when RA sets SSA flag on each dst.
807 * Slight hack. We really should set SSA flag on
808 * every dst register in the frontend.
810 ir3_reg_create(split
, 0, IR3_REG_SSA
);
811 /* and src reg used to hold original instr */
812 ir3_reg_create(split
, 0, IR3_REG_SSA
)->instr
= instr
;
814 split
->cp
.left
= prev
;
815 split
->cp
.left_cnt
++;
816 prev
->cp
.right
= split
;
817 prev
->cp
.right_cnt
++;
819 if ((wrmask
& (1 << i
)) && !ctx
->atomic
)
820 ssa_dst(ctx
, split
, dst
, chan
+i
);
828 static struct ir3_register
*
829 add_dst_reg(struct ir3_compile_context
*ctx
, struct ir3_instruction
*instr
,
830 const struct tgsi_dst_register
*dst
, unsigned chan
)
832 return add_dst_reg_wrmask(ctx
, instr
, dst
, chan
, 0x1);
835 static struct ir3_register
*
836 add_src_reg_wrmask(struct ir3_compile_context
*ctx
,
837 struct ir3_instruction
*instr
, const struct tgsi_src_register
*src
,
838 unsigned chan
, unsigned wrmask
)
840 unsigned flags
= 0, num
= 0;
841 struct ir3_register
*reg
;
844 case TGSI_FILE_IMMEDIATE
:
845 /* TODO if possible, use actual immediate instead of const.. but
846 * TGSI has vec4 immediates, we can only embed scalar (of limited
847 * size, depending on instruction..)
849 flags
|= IR3_REG_CONST
;
850 num
= src
->Index
+ ctx
->so
->first_immediate
;
852 case TGSI_FILE_CONSTANT
:
853 flags
|= IR3_REG_CONST
;
856 case TGSI_FILE_OUTPUT
:
857 /* NOTE: we should only end up w/ OUTPUT file for things like
858 * clamp()'ing saturated dst instructions
860 case TGSI_FILE_INPUT
:
861 case TGSI_FILE_TEMPORARY
:
862 case TGSI_FILE_SYSTEM_VALUE
:
866 compile_error(ctx
, "unsupported src register file: %s\n",
867 tgsi_file_name(src
->File
));
871 /* We seem to have 8 bits (6.2) for dst register always, so I think
872 * it is safe to assume GPR cannot be >=64
874 * cat3 instructions only have 8 bits for src2, but cannot take a
877 * cat5 and cat6 in some cases only has 8 bits, but cannot take a
880 * Other than that we seem to have 12 bits to encode const src,
881 * except for cat1 which may only have 11 bits (but that seems like
884 if (flags
& IR3_REG_CONST
)
885 compile_assert(ctx
, src
->Index
< (1 << 9));
887 compile_assert(ctx
, src
->Index
< (1 << 6));
890 flags
|= IR3_REG_ABS
;
892 flags
|= IR3_REG_NEGATE
;
895 flags
|= IR3_REG_RELATIV
;
897 /* shouldn't happen, and we can't cope with it below: */
898 compile_assert(ctx
, wrmask
== 0x1);
900 compile_assert(ctx
, ctx
->block
->address
);
902 compile_assert(ctx
, ctx
->block
->address
== instr
->address
);
904 instr
->address
= ctx
->block
->address
;
905 array_insert(ctx
->ir
->indirects
, instr
);
908 reg
= ir3_reg_create(instr
, regid(num
, chan
), flags
);
909 reg
->wrmask
= wrmask
;
913 ssa_src(ctx
, reg
, src
, chan
);
914 } else if ((src
->File
== TGSI_FILE_TEMPORARY
) ||
915 (src
->File
== TGSI_FILE_OUTPUT
) ||
916 (src
->File
== TGSI_FILE_INPUT
)) {
917 struct ir3_instruction
*collect
;
920 compile_assert(ctx
, !src
->Indirect
);
922 /* if instruction reads multiple, we need to create
923 * some place-holder collect the registers:
925 collect
= ir3_instr_create(ctx
->block
, -1, OPC_META_FI
);
926 ir3_reg_create(collect
, 0, 0); /* unused dst reg */
928 for (i
= 0; i
< 4; i
++) {
929 if (wrmask
& (1 << i
)) {
930 /* and src reg used point to the original instr */
931 ssa_src(ctx
, ir3_reg_create(collect
, 0, IR3_REG_SSA
),
933 } else if (wrmask
& ~((i
<< i
) - 1)) {
934 /* if any remaining components, then dummy
935 * placeholder src reg to fill in the blanks:
937 ir3_reg_create(collect
, 0, 0);
941 reg
->flags
|= IR3_REG_SSA
;
942 reg
->instr
= collect
;
948 static struct ir3_register
*
949 add_src_reg(struct ir3_compile_context
*ctx
, struct ir3_instruction
*instr
,
950 const struct tgsi_src_register
*src
, unsigned chan
)
952 return add_src_reg_wrmask(ctx
, instr
, src
, chan
, 0x1);
956 src_from_dst(struct tgsi_src_register
*src
, struct tgsi_dst_register
*dst
)
958 src
->File
= dst
->File
;
959 src
->Indirect
= dst
->Indirect
;
960 src
->Dimension
= dst
->Dimension
;
961 src
->Index
= dst
->Index
;
964 src
->SwizzleX
= TGSI_SWIZZLE_X
;
965 src
->SwizzleY
= TGSI_SWIZZLE_Y
;
966 src
->SwizzleZ
= TGSI_SWIZZLE_Z
;
967 src
->SwizzleW
= TGSI_SWIZZLE_W
;
970 /* Get internal-temp src/dst to use for a sequence of instructions
971 * generated by a single TGSI op.
973 static struct tgsi_src_register
*
974 get_internal_temp(struct ir3_compile_context
*ctx
,
975 struct tgsi_dst_register
*tmp_dst
)
977 struct tgsi_src_register
*tmp_src
;
980 tmp_dst
->File
= TGSI_FILE_TEMPORARY
;
981 tmp_dst
->WriteMask
= TGSI_WRITEMASK_XYZW
;
982 tmp_dst
->Indirect
= 0;
983 tmp_dst
->Dimension
= 0;
985 /* assign next temporary: */
986 n
= ctx
->num_internal_temps
++;
987 compile_assert(ctx
, n
< ARRAY_SIZE(ctx
->internal_temps
));
988 tmp_src
= &ctx
->internal_temps
[n
];
990 tmp_dst
->Index
= ctx
->info
.file_max
[TGSI_FILE_TEMPORARY
] + n
+ 1;
992 src_from_dst(tmp_src
, tmp_dst
);
998 is_const(struct tgsi_src_register
*src
)
1000 return (src
->File
== TGSI_FILE_CONSTANT
) ||
1001 (src
->File
== TGSI_FILE_IMMEDIATE
);
1005 is_relative(struct tgsi_src_register
*src
)
1007 return src
->Indirect
;
1011 is_rel_or_const(struct tgsi_src_register
*src
)
1013 return is_relative(src
) || is_const(src
);
1017 get_ftype(struct ir3_compile_context
*ctx
)
1023 get_utype(struct ir3_compile_context
*ctx
)
1029 get_stype(struct ir3_compile_context
*ctx
)
1035 src_swiz(struct tgsi_src_register
*src
, int chan
)
1038 case 0: return src
->SwizzleX
;
1039 case 1: return src
->SwizzleY
;
1040 case 2: return src
->SwizzleZ
;
1041 case 3: return src
->SwizzleW
;
1047 /* for instructions that cannot take a const register as src, if needed
1048 * generate a move to temporary gpr:
1050 static struct tgsi_src_register
*
1051 get_unconst(struct ir3_compile_context
*ctx
, struct tgsi_src_register
*src
)
1053 struct tgsi_dst_register tmp_dst
;
1054 struct tgsi_src_register
*tmp_src
;
1056 compile_assert(ctx
, is_rel_or_const(src
));
1058 tmp_src
= get_internal_temp(ctx
, &tmp_dst
);
1060 create_mov(ctx
, &tmp_dst
, src
);
1066 get_immediate(struct ir3_compile_context
*ctx
,
1067 struct tgsi_src_register
*reg
, uint32_t val
)
1069 unsigned neg
, swiz
, idx
, i
;
1070 /* actually maps 1:1 currently.. not sure if that is safe to rely on: */
1071 static const unsigned swiz2tgsi
[] = {
1072 TGSI_SWIZZLE_X
, TGSI_SWIZZLE_Y
, TGSI_SWIZZLE_Z
, TGSI_SWIZZLE_W
,
1075 for (i
= 0; i
< ctx
->immediate_idx
; i
++) {
1079 if (ctx
->so
->immediates
[idx
].val
[swiz
] == val
) {
1084 if (ctx
->so
->immediates
[idx
].val
[swiz
] == -val
) {
1090 if (i
== ctx
->immediate_idx
) {
1091 /* need to generate a new immediate: */
1095 ctx
->so
->immediates
[idx
].val
[swiz
] = val
;
1096 ctx
->so
->immediates_count
= idx
+ 1;
1097 ctx
->immediate_idx
++;
1100 reg
->File
= TGSI_FILE_IMMEDIATE
;
1106 reg
->SwizzleX
= swiz2tgsi
[swiz
];
1107 reg
->SwizzleY
= swiz2tgsi
[swiz
];
1108 reg
->SwizzleZ
= swiz2tgsi
[swiz
];
1109 reg
->SwizzleW
= swiz2tgsi
[swiz
];
1113 create_mov(struct ir3_compile_context
*ctx
, struct tgsi_dst_register
*dst
,
1114 struct tgsi_src_register
*src
)
1116 type_t type_mov
= get_ftype(ctx
);
1119 for (i
= 0; i
< 4; i
++) {
1120 /* move to destination: */
1121 if (dst
->WriteMask
& (1 << i
)) {
1122 struct ir3_instruction
*instr
;
1124 if (src
->Absolute
|| src
->Negate
) {
1125 /* can't have abs or neg on a mov instr, so use
1126 * absneg.f instead to handle these cases:
1128 instr
= instr_create(ctx
, 2, OPC_ABSNEG_F
);
1130 instr
= instr_create(ctx
, 1, 0);
1131 instr
->cat1
.src_type
= type_mov
;
1132 instr
->cat1
.dst_type
= type_mov
;
1135 add_dst_reg(ctx
, instr
, dst
, i
);
1136 add_src_reg(ctx
, instr
, src
, src_swiz(src
, i
));
1142 create_clamp(struct ir3_compile_context
*ctx
,
1143 struct tgsi_dst_register
*dst
, struct tgsi_src_register
*val
,
1144 struct tgsi_src_register
*minval
, struct tgsi_src_register
*maxval
)
1146 struct ir3_instruction
*instr
;
1148 instr
= instr_create(ctx
, 2, OPC_MAX_F
);
1149 vectorize(ctx
, instr
, dst
, 2, val
, 0, minval
, 0);
1151 instr
= instr_create(ctx
, 2, OPC_MIN_F
);
1152 vectorize(ctx
, instr
, dst
, 2, val
, 0, maxval
, 0);
1156 create_clamp_imm(struct ir3_compile_context
*ctx
,
1157 struct tgsi_dst_register
*dst
,
1158 uint32_t minval
, uint32_t maxval
)
1160 struct tgsi_src_register minconst
, maxconst
;
1161 struct tgsi_src_register src
;
1163 src_from_dst(&src
, dst
);
1165 get_immediate(ctx
, &minconst
, minval
);
1166 get_immediate(ctx
, &maxconst
, maxval
);
1168 create_clamp(ctx
, dst
, &src
, &minconst
, &maxconst
);
1171 static struct tgsi_dst_register
*
1172 get_dst(struct ir3_compile_context
*ctx
, struct tgsi_full_instruction
*inst
)
1174 struct tgsi_dst_register
*dst
= &inst
->Dst
[0].Register
;
1177 compile_assert(ctx
, !ctx
->using_tmp_dst
);
1178 ctx
->using_tmp_dst
= true;
1180 for (i
= 0; i
< inst
->Instruction
.NumSrcRegs
; i
++) {
1181 struct tgsi_src_register
*src
= &inst
->Src
[i
].Register
;
1182 if ((src
->File
== dst
->File
) && (src
->Index
== dst
->Index
)) {
1183 if ((dst
->WriteMask
== TGSI_WRITEMASK_XYZW
) &&
1184 (src
->SwizzleX
== TGSI_SWIZZLE_X
) &&
1185 (src
->SwizzleY
== TGSI_SWIZZLE_Y
) &&
1186 (src
->SwizzleZ
== TGSI_SWIZZLE_Z
) &&
1187 (src
->SwizzleW
== TGSI_SWIZZLE_W
))
1189 ctx
->tmp_src
= get_internal_temp(ctx
, &ctx
->tmp_dst
);
1190 ctx
->tmp_dst
.WriteMask
= dst
->WriteMask
;
1191 dst
= &ctx
->tmp_dst
;
1199 put_dst(struct ir3_compile_context
*ctx
, struct tgsi_full_instruction
*inst
,
1200 struct tgsi_dst_register
*dst
)
1202 compile_assert(ctx
, ctx
->using_tmp_dst
);
1203 ctx
->using_tmp_dst
= false;
1205 /* if necessary, add mov back into original dst: */
1206 if (dst
!= &inst
->Dst
[0].Register
) {
1207 create_mov(ctx
, &inst
->Dst
[0].Register
, ctx
->tmp_src
);
1211 /* helper to generate the necessary repeat and/or additional instructions
1212 * to turn a scalar instruction into a vector operation:
1215 vectorize(struct ir3_compile_context
*ctx
, struct ir3_instruction
*instr
,
1216 struct tgsi_dst_register
*dst
, int nsrcs
, ...)
1221 instr_atomic_start(ctx
);
1223 for (i
= 0; i
< 4; i
++) {
1224 if (dst
->WriteMask
& (1 << i
)) {
1225 struct ir3_instruction
*cur
;
1230 cur
= instr_create(ctx
, instr
->category
, instr
->opc
);
1231 memcpy(cur
->info
, instr
->info
, sizeof(cur
->info
));
1234 add_dst_reg(ctx
, cur
, dst
, i
);
1236 va_start(ap
, nsrcs
);
1237 for (j
= 0; j
< nsrcs
; j
++) {
1238 struct tgsi_src_register
*src
=
1239 va_arg(ap
, struct tgsi_src_register
*);
1240 unsigned flags
= va_arg(ap
, unsigned);
1241 struct ir3_register
*reg
;
1242 if (flags
& IR3_REG_IMMED
) {
1243 reg
= ir3_reg_create(cur
, 0, IR3_REG_IMMED
);
1244 /* this is an ugly cast.. should have put flags first! */
1245 reg
->iim_val
= *(int *)&src
;
1247 reg
= add_src_reg(ctx
, cur
, src
, src_swiz(src
, i
));
1249 reg
->flags
|= flags
& ~IR3_REG_NEGATE
;
1250 if (flags
& IR3_REG_NEGATE
)
1251 reg
->flags
^= IR3_REG_NEGATE
;
1257 instr_atomic_end(ctx
);
1261 * Handlers for TGSI instructions which do not have a 1:1 mapping to
1262 * native instructions:
1266 trans_clamp(const struct instr_translater
*t
,
1267 struct ir3_compile_context
*ctx
,
1268 struct tgsi_full_instruction
*inst
)
1270 struct tgsi_dst_register
*dst
= get_dst(ctx
, inst
);
1271 struct tgsi_src_register
*src0
= &inst
->Src
[0].Register
;
1272 struct tgsi_src_register
*src1
= &inst
->Src
[1].Register
;
1273 struct tgsi_src_register
*src2
= &inst
->Src
[2].Register
;
1275 create_clamp(ctx
, dst
, src0
, src1
, src2
);
1277 put_dst(ctx
, inst
, dst
);
1280 /* ARL(x) = x, but mova from hrN.x to a0.. */
1282 trans_arl(const struct instr_translater
*t
,
1283 struct ir3_compile_context
*ctx
,
1284 struct tgsi_full_instruction
*inst
)
1286 struct ir3_instruction
*instr
;
1287 struct tgsi_dst_register tmp_dst
;
1288 struct tgsi_src_register
*tmp_src
;
1289 struct tgsi_dst_register
*dst
= &inst
->Dst
[0].Register
;
1290 struct tgsi_src_register
*src
= &inst
->Src
[0].Register
;
1291 unsigned chan
= src
->SwizzleX
;
1293 compile_assert(ctx
, dst
->File
== TGSI_FILE_ADDRESS
);
1295 /* NOTE: we allocate a temporary from a flat register
1296 * namespace (ignoring half vs full). It turns out
1297 * not to really matter since registers get reassigned
1298 * later in ir3_ra which (hopefully!) can deal a bit
1299 * better with mixed half and full precision.
1301 tmp_src
= get_internal_temp(ctx
, &tmp_dst
);
1303 /* cov.{u,f}{32,16}s16 Rtmp, Rsrc */
1304 instr
= instr_create(ctx
, 1, 0);
1305 instr
->cat1
.src_type
= (t
->tgsi_opc
== TGSI_OPCODE_ARL
) ?
1306 get_ftype(ctx
) : get_utype(ctx
);
1307 instr
->cat1
.dst_type
= TYPE_S16
;
1308 add_dst_reg(ctx
, instr
, &tmp_dst
, chan
)->flags
|= IR3_REG_HALF
;
1309 add_src_reg(ctx
, instr
, src
, chan
);
1311 /* shl.b Rtmp, Rtmp, 2 */
1312 instr
= instr_create(ctx
, 2, OPC_SHL_B
);
1313 add_dst_reg(ctx
, instr
, &tmp_dst
, chan
)->flags
|= IR3_REG_HALF
;
1314 add_src_reg(ctx
, instr
, tmp_src
, chan
)->flags
|= IR3_REG_HALF
;
1315 ir3_reg_create(instr
, 0, IR3_REG_IMMED
)->iim_val
= 2;
1318 instr
= instr_create(ctx
, 1, 0);
1319 instr
->cat1
.src_type
= TYPE_S16
;
1320 instr
->cat1
.dst_type
= TYPE_S16
;
1321 add_dst_reg(ctx
, instr
, dst
, 0)->flags
|= IR3_REG_HALF
;
1322 add_src_reg(ctx
, instr
, tmp_src
, chan
)->flags
|= IR3_REG_HALF
;
1326 * texture fetch/sample instructions:
1332 unsigned src_wrmask
, flags
;
1335 struct target_info
{
1342 static const struct target_info tex_targets
[] = {
1343 [TGSI_TEXTURE_1D
] = { 1, 0, 0, 0 },
1344 [TGSI_TEXTURE_2D
] = { 2, 0, 0, 0 },
1345 [TGSI_TEXTURE_3D
] = { 3, 0, 0, 0 },
1346 [TGSI_TEXTURE_CUBE
] = { 3, 1, 0, 0 },
1347 [TGSI_TEXTURE_RECT
] = { 2, 0, 0, 0 },
1348 [TGSI_TEXTURE_SHADOW1D
] = { 1, 0, 0, 1 },
1349 [TGSI_TEXTURE_SHADOW2D
] = { 2, 0, 0, 1 },
1350 [TGSI_TEXTURE_SHADOWRECT
] = { 2, 0, 0, 1 },
1351 [TGSI_TEXTURE_1D_ARRAY
] = { 1, 0, 1, 0 },
1352 [TGSI_TEXTURE_2D_ARRAY
] = { 2, 0, 1, 0 },
1353 [TGSI_TEXTURE_SHADOW1D_ARRAY
] = { 1, 0, 1, 1 },
1354 [TGSI_TEXTURE_SHADOW2D_ARRAY
] = { 2, 0, 1, 1 },
1355 [TGSI_TEXTURE_SHADOWCUBE
] = { 3, 1, 0, 1 },
1356 [TGSI_TEXTURE_2D_MSAA
] = { 2, 0, 0, 0 },
1357 [TGSI_TEXTURE_2D_ARRAY_MSAA
] = { 2, 0, 1, 0 },
1358 [TGSI_TEXTURE_CUBE_ARRAY
] = { 3, 1, 1, 0 },
1359 [TGSI_TEXTURE_SHADOWCUBE_ARRAY
] = { 3, 1, 1, 1 },
1363 fill_tex_info(struct ir3_compile_context
*ctx
,
1364 struct tgsi_full_instruction
*inst
,
1365 struct tex_info
*info
)
1367 const struct target_info
*tgt
= &tex_targets
[inst
->Texture
.Texture
];
1370 info
->flags
|= IR3_INSTR_3D
;
1372 info
->flags
|= IR3_INSTR_A
;
1374 info
->flags
|= IR3_INSTR_S
;
1376 switch (inst
->Instruction
.Opcode
) {
1377 case TGSI_OPCODE_TXB
:
1378 case TGSI_OPCODE_TXB2
:
1379 case TGSI_OPCODE_TXL
:
1380 case TGSI_OPCODE_TXF
:
1383 case TGSI_OPCODE_TXP
:
1384 info
->flags
|= IR3_INSTR_P
;
1386 case TGSI_OPCODE_TEX
:
1387 case TGSI_OPCODE_TXD
:
1393 * lay out the first argument in the proper order:
1394 * - actual coordinates first
1396 * - shadow reference
1399 * bias/lod go into the second arg
1402 for (arg
= 0; arg
< tgt
->dims
; arg
++)
1403 info
->order
[arg
] = pos
++;
1405 info
->order
[pos
++] = -1;
1407 info
->order
[pos
++] = MAX2(arg
+ tgt
->array
, 2);
1409 info
->order
[pos
++] = arg
++;
1410 if (info
->flags
& IR3_INSTR_P
)
1411 info
->order
[pos
++] = 3;
1413 info
->src_wrmask
= (1 << pos
) - 1;
1415 for (; pos
< 4; pos
++)
1416 info
->order
[pos
] = -1;
1421 static bool check_swiz(struct tgsi_src_register
*src
, const int8_t order
[4])
1424 for (i
= 1; (i
< 4) && order
[i
] >= 0; i
++)
1425 if (src_swiz(src
, i
) != (src_swiz(src
, 0) + order
[i
]))
1430 static bool is_1d(unsigned tex
)
1432 return tex_targets
[tex
].dims
== 1;
1435 static struct tgsi_src_register
*
1436 get_tex_coord(struct ir3_compile_context
*ctx
,
1437 struct tgsi_full_instruction
*inst
,
1438 const struct tex_info
*tinf
)
1440 struct tgsi_src_register
*coord
= &inst
->Src
[0].Register
;
1441 struct ir3_instruction
*instr
;
1442 unsigned tex
= inst
->Texture
.Texture
;
1443 struct tgsi_dst_register tmp_dst
;
1444 struct tgsi_src_register
*tmp_src
;
1445 type_t type_mov
= get_ftype(ctx
);
1448 /* need to move things around: */
1449 tmp_src
= get_internal_temp(ctx
, &tmp_dst
);
1451 for (j
= 0; j
< 4; j
++) {
1452 if (tinf
->order
[j
] < 0)
1454 instr
= instr_create(ctx
, 1, 0); /* mov */
1455 instr
->cat1
.src_type
= type_mov
;
1456 instr
->cat1
.dst_type
= type_mov
;
1457 add_dst_reg(ctx
, instr
, &tmp_dst
, j
);
1458 add_src_reg(ctx
, instr
, coord
,
1459 src_swiz(coord
, tinf
->order
[j
]));
1462 /* fix up .y coord: */
1464 struct ir3_register
*imm
;
1465 instr
= instr_create(ctx
, 1, 0); /* mov */
1466 instr
->cat1
.src_type
= type_mov
;
1467 instr
->cat1
.dst_type
= type_mov
;
1468 add_dst_reg(ctx
, instr
, &tmp_dst
, 1); /* .y */
1469 imm
= ir3_reg_create(instr
, 0, IR3_REG_IMMED
);
1470 if (inst
->Instruction
.Opcode
== TGSI_OPCODE_TXF
)
1480 trans_samp(const struct instr_translater
*t
,
1481 struct ir3_compile_context
*ctx
,
1482 struct tgsi_full_instruction
*inst
)
1484 struct ir3_instruction
*instr
, *collect
;
1485 struct ir3_register
*reg
;
1486 struct tgsi_dst_register
*dst
= &inst
->Dst
[0].Register
;
1487 struct tgsi_src_register
*orig
, *coord
, *samp
, *offset
, *dpdx
, *dpdy
;
1488 struct tgsi_src_register zero
;
1489 const struct target_info
*tgt
= &tex_targets
[inst
->Texture
.Texture
];
1490 struct tex_info tinf
;
1493 memset(&tinf
, 0, sizeof(tinf
));
1494 fill_tex_info(ctx
, inst
, &tinf
);
1495 coord
= get_tex_coord(ctx
, inst
, &tinf
);
1496 get_immediate(ctx
, &zero
, 0);
1498 switch (inst
->Instruction
.Opcode
) {
1499 case TGSI_OPCODE_TXB2
:
1500 orig
= &inst
->Src
[1].Register
;
1501 samp
= &inst
->Src
[2].Register
;
1503 case TGSI_OPCODE_TXD
:
1504 orig
= &inst
->Src
[0].Register
;
1505 dpdx
= &inst
->Src
[1].Register
;
1506 dpdy
= &inst
->Src
[2].Register
;
1507 samp
= &inst
->Src
[3].Register
;
1508 if (is_rel_or_const(dpdx
))
1509 dpdx
= get_unconst(ctx
, dpdx
);
1510 if (is_rel_or_const(dpdy
))
1511 dpdy
= get_unconst(ctx
, dpdy
);
1514 orig
= &inst
->Src
[0].Register
;
1515 samp
= &inst
->Src
[1].Register
;
1518 if (tinf
.args
> 1 && is_rel_or_const(orig
))
1519 orig
= get_unconst(ctx
, orig
);
1521 /* scale up integer coords for TXF based on the LOD */
1522 if (inst
->Instruction
.Opcode
== TGSI_OPCODE_TXF
) {
1523 struct tgsi_dst_register tmp_dst
;
1524 struct tgsi_src_register
*tmp_src
;
1525 type_t type_mov
= get_utype(ctx
);
1527 tmp_src
= get_internal_temp(ctx
, &tmp_dst
);
1528 for (i
= 0; i
< tgt
->dims
; i
++) {
1529 instr
= instr_create(ctx
, 2, OPC_SHL_B
);
1530 add_dst_reg(ctx
, instr
, &tmp_dst
, i
);
1531 add_src_reg(ctx
, instr
, coord
, src_swiz(coord
, i
));
1532 add_src_reg(ctx
, instr
, orig
, orig
->SwizzleW
);
1534 if (tgt
->dims
< 2) {
1535 instr
= instr_create(ctx
, 1, 0);
1536 instr
->cat1
.src_type
= type_mov
;
1537 instr
->cat1
.dst_type
= type_mov
;
1538 add_dst_reg(ctx
, instr
, &tmp_dst
, i
);
1539 add_src_reg(ctx
, instr
, &zero
, 0);
1543 instr
= instr_create(ctx
, 1, 0);
1544 instr
->cat1
.src_type
= type_mov
;
1545 instr
->cat1
.dst_type
= type_mov
;
1546 add_dst_reg(ctx
, instr
, &tmp_dst
, i
);
1547 add_src_reg(ctx
, instr
, coord
, src_swiz(coord
, i
));
1552 if (inst
->Texture
.NumOffsets
) {
1553 struct tgsi_texture_offset
*tex_offset
= &inst
->TexOffsets
[0];
1554 struct tgsi_src_register offset_src
= {0};
1556 offset_src
.File
= tex_offset
->File
;
1557 offset_src
.Index
= tex_offset
->Index
;
1558 offset_src
.SwizzleX
= tex_offset
->SwizzleX
;
1559 offset_src
.SwizzleY
= tex_offset
->SwizzleY
;
1560 offset_src
.SwizzleZ
= tex_offset
->SwizzleZ
;
1561 offset
= get_unconst(ctx
, &offset_src
);
1562 tinf
.flags
|= IR3_INSTR_O
;
1565 instr
= instr_create(ctx
, 5, t
->opc
);
1566 if (ctx
->integer_s
& (1 << samp
->Index
))
1567 instr
->cat5
.type
= get_utype(ctx
);
1569 instr
->cat5
.type
= get_ftype(ctx
);
1570 instr
->cat5
.samp
= samp
->Index
;
1571 instr
->cat5
.tex
= samp
->Index
;
1572 instr
->flags
|= tinf
.flags
;
1574 add_dst_reg_wrmask(ctx
, instr
, dst
, 0, dst
->WriteMask
);
1576 reg
= ir3_reg_create(instr
, 0, IR3_REG_SSA
);
1578 collect
= ir3_instr_create2(ctx
->block
, -1, OPC_META_FI
, 12);
1579 ir3_reg_create(collect
, 0, 0);
1580 for (i
= 0; i
< 4; i
++) {
1581 if (tinf
.src_wrmask
& (1 << i
))
1582 ssa_src(ctx
, ir3_reg_create(collect
, 0, IR3_REG_SSA
),
1583 coord
, src_swiz(coord
, i
));
1584 else if (tinf
.src_wrmask
& ~((1 << i
) - 1))
1585 ir3_reg_create(collect
, 0, 0);
1588 /* Attach derivatives onto the end of the fan-in. Derivatives start after
1589 * the 4th argument, so make sure that fi is padded up to 4 first.
1591 if (inst
->Instruction
.Opcode
== TGSI_OPCODE_TXD
) {
1592 while (collect
->regs_count
< 5)
1593 ssa_src(ctx
, ir3_reg_create(collect
, 0, IR3_REG_SSA
), &zero
, 0);
1594 for (i
= 0; i
< tgt
->dims
; i
++)
1595 ssa_src(ctx
, ir3_reg_create(collect
, 0, IR3_REG_SSA
), dpdx
, i
);
1597 ssa_src(ctx
, ir3_reg_create(collect
, 0, IR3_REG_SSA
), &zero
, 0);
1598 for (i
= 0; i
< tgt
->dims
; i
++)
1599 ssa_src(ctx
, ir3_reg_create(collect
, 0, IR3_REG_SSA
), dpdy
, i
);
1601 ssa_src(ctx
, ir3_reg_create(collect
, 0, IR3_REG_SSA
), &zero
, 0);
1602 tinf
.src_wrmask
|= ((1 << (2 * MAX2(tgt
->dims
, 2))) - 1) << 4;
1605 reg
->instr
= collect
;
1606 reg
->wrmask
= tinf
.src_wrmask
;
1608 /* The second argument contains the offsets, followed by the lod/bias
1609 * argument. This is constructed more manually due to the dynamic nature.
1611 if (inst
->Texture
.NumOffsets
== 0 && tinf
.args
== 1)
1614 reg
= ir3_reg_create(instr
, 0, IR3_REG_SSA
);
1616 collect
= ir3_instr_create2(ctx
->block
, -1, OPC_META_FI
, 5);
1617 ir3_reg_create(collect
, 0, 0);
1619 if (inst
->Texture
.NumOffsets
) {
1620 for (i
= 0; i
< tgt
->dims
; i
++)
1621 ssa_src(ctx
, ir3_reg_create(collect
, 0, IR3_REG_SSA
),
1624 ssa_src(ctx
, ir3_reg_create(collect
, 0, IR3_REG_SSA
), &zero
, 0);
1626 if (inst
->Instruction
.Opcode
== TGSI_OPCODE_TXB2
)
1627 ssa_src(ctx
, ir3_reg_create(collect
, 0, IR3_REG_SSA
),
1628 orig
, orig
->SwizzleX
);
1629 else if (tinf
.args
> 1)
1630 ssa_src(ctx
, ir3_reg_create(collect
, 0, IR3_REG_SSA
),
1631 orig
, orig
->SwizzleW
);
1633 reg
->instr
= collect
;
1634 reg
->wrmask
= (1 << (collect
->regs_count
- 1)) - 1;
1638 trans_txq(const struct instr_translater
*t
,
1639 struct ir3_compile_context
*ctx
,
1640 struct tgsi_full_instruction
*inst
)
1642 struct ir3_instruction
*instr
;
1643 struct tgsi_dst_register
*dst
= &inst
->Dst
[0].Register
;
1644 struct tgsi_src_register
*level
= &inst
->Src
[0].Register
;
1645 struct tgsi_src_register
*samp
= &inst
->Src
[1].Register
;
1646 const struct target_info
*tgt
= &tex_targets
[inst
->Texture
.Texture
];
1647 struct tex_info tinf
;
1649 memset(&tinf
, 0, sizeof(tinf
));
1650 fill_tex_info(ctx
, inst
, &tinf
);
1651 if (is_rel_or_const(level
))
1652 level
= get_unconst(ctx
, level
);
1654 instr
= instr_create(ctx
, 5, OPC_GETSIZE
);
1655 instr
->cat5
.type
= get_utype(ctx
);
1656 instr
->cat5
.samp
= samp
->Index
;
1657 instr
->cat5
.tex
= samp
->Index
;
1658 instr
->flags
|= tinf
.flags
;
1660 if (tgt
->array
&& (dst
->WriteMask
& (1 << tgt
->dims
))) {
1661 /* Array size actually ends up in .w rather than .z. This doesn't
1662 * matter for miplevel 0, but for higher mips the value in z is
1663 * minified whereas w stays. Also, the value in TEX_CONST_3_DEPTH is
1664 * returned, which means that we have to add 1 to it for arrays.
1666 struct tgsi_dst_register tmp_dst
;
1667 struct tgsi_src_register
*tmp_src
;
1668 type_t type_mov
= get_utype(ctx
);
1670 tmp_src
= get_internal_temp(ctx
, &tmp_dst
);
1671 add_dst_reg_wrmask(ctx
, instr
, &tmp_dst
, 0,
1672 dst
->WriteMask
| TGSI_WRITEMASK_W
);
1673 add_src_reg_wrmask(ctx
, instr
, level
, level
->SwizzleX
, 0x1);
1675 if (dst
->WriteMask
& TGSI_WRITEMASK_X
) {
1676 instr
= instr_create(ctx
, 1, 0);
1677 instr
->cat1
.src_type
= type_mov
;
1678 instr
->cat1
.dst_type
= type_mov
;
1679 add_dst_reg(ctx
, instr
, dst
, 0);
1680 add_src_reg(ctx
, instr
, tmp_src
, src_swiz(tmp_src
, 0));
1683 if (tgt
->dims
== 2) {
1684 if (dst
->WriteMask
& TGSI_WRITEMASK_Y
) {
1685 instr
= instr_create(ctx
, 1, 0);
1686 instr
->cat1
.src_type
= type_mov
;
1687 instr
->cat1
.dst_type
= type_mov
;
1688 add_dst_reg(ctx
, instr
, dst
, 1);
1689 add_src_reg(ctx
, instr
, tmp_src
, src_swiz(tmp_src
, 1));
1693 instr
= instr_create(ctx
, 2, OPC_ADD_U
);
1694 add_dst_reg(ctx
, instr
, dst
, tgt
->dims
);
1695 add_src_reg(ctx
, instr
, tmp_src
, src_swiz(tmp_src
, 3));
1696 ir3_reg_create(instr
, 0, IR3_REG_IMMED
)->iim_val
= 1;
1698 add_dst_reg_wrmask(ctx
, instr
, dst
, 0, dst
->WriteMask
);
1699 add_src_reg_wrmask(ctx
, instr
, level
, level
->SwizzleX
, 0x1);
1702 if (dst
->WriteMask
& TGSI_WRITEMASK_W
) {
1703 /* The # of levels comes from getinfo.z. We need to add 1 to it, since
1704 * the value in TEX_CONST_0 is zero-based.
1706 struct tgsi_dst_register tmp_dst
;
1707 struct tgsi_src_register
*tmp_src
;
1709 tmp_src
= get_internal_temp(ctx
, &tmp_dst
);
1710 instr
= instr_create(ctx
, 5, OPC_GETINFO
);
1711 instr
->cat5
.type
= get_utype(ctx
);
1712 instr
->cat5
.samp
= samp
->Index
;
1713 instr
->cat5
.tex
= samp
->Index
;
1714 add_dst_reg_wrmask(ctx
, instr
, &tmp_dst
, 0, TGSI_WRITEMASK_Z
);
1716 instr
= instr_create(ctx
, 2, OPC_ADD_U
);
1717 add_dst_reg(ctx
, instr
, dst
, 3);
1718 add_src_reg(ctx
, instr
, tmp_src
, src_swiz(tmp_src
, 2));
1719 ir3_reg_create(instr
, 0, IR3_REG_IMMED
)->iim_val
= 1;
1725 trans_deriv(const struct instr_translater
*t
,
1726 struct ir3_compile_context
*ctx
,
1727 struct tgsi_full_instruction
*inst
)
1729 struct ir3_instruction
*instr
;
1730 struct tgsi_dst_register
*dst
= &inst
->Dst
[0].Register
;
1731 struct tgsi_src_register
*src
= &inst
->Src
[0].Register
;
1732 static const int8_t order
[4] = {0, 1, 2, 3};
1734 if (!check_swiz(src
, order
)) {
1735 struct tgsi_dst_register tmp_dst
;
1736 struct tgsi_src_register
*tmp_src
;
1738 tmp_src
= get_internal_temp(ctx
, &tmp_dst
);
1739 create_mov(ctx
, &tmp_dst
, src
);
1744 /* This might be a workaround for hw bug? Blob compiler always
1745 * seems to work two components at a time for dsy/dsx. It does
1746 * actually seem to work in some cases (or at least some piglit
1747 * tests) for four components at a time. But seems more reliable
1748 * to split this into two instructions like the blob compiler
1752 instr
= instr_create(ctx
, 5, t
->opc
);
1753 instr
->cat5
.type
= get_ftype(ctx
);
1754 add_dst_reg_wrmask(ctx
, instr
, dst
, 0, dst
->WriteMask
& 0x3);
1755 add_src_reg_wrmask(ctx
, instr
, src
, 0, dst
->WriteMask
& 0x3);
1757 instr
= instr_create(ctx
, 5, t
->opc
);
1758 instr
->cat5
.type
= get_ftype(ctx
);
1759 add_dst_reg_wrmask(ctx
, instr
, dst
, 2, (dst
->WriteMask
>> 2) & 0x3);
1760 add_src_reg_wrmask(ctx
, instr
, src
, 2, (dst
->WriteMask
>> 2) & 0x3);
1764 * SEQ(a,b) = (a == b) ? 1.0 : 0.0
1765 * cmps.f.eq tmp0, a, b
1766 * cov.u16f16 dst, tmp0
1768 * SNE(a,b) = (a != b) ? 1.0 : 0.0
1769 * cmps.f.ne tmp0, a, b
1770 * cov.u16f16 dst, tmp0
1772 * SGE(a,b) = (a >= b) ? 1.0 : 0.0
1773 * cmps.f.ge tmp0, a, b
1774 * cov.u16f16 dst, tmp0
1776 * SLE(a,b) = (a <= b) ? 1.0 : 0.0
1777 * cmps.f.le tmp0, a, b
1778 * cov.u16f16 dst, tmp0
1780 * SGT(a,b) = (a > b) ? 1.0 : 0.0
1781 * cmps.f.gt tmp0, a, b
1782 * cov.u16f16 dst, tmp0
1784 * SLT(a,b) = (a < b) ? 1.0 : 0.0
1785 * cmps.f.lt tmp0, a, b
1786 * cov.u16f16 dst, tmp0
1788 * CMP(a,b,c) = (a < 0.0) ? b : c
1789 * cmps.f.lt tmp0, a, {0.0}
1790 * sel.b16 dst, b, tmp0, c
1793 trans_cmp(const struct instr_translater
*t
,
1794 struct ir3_compile_context
*ctx
,
1795 struct tgsi_full_instruction
*inst
)
1797 struct ir3_instruction
*instr
;
1798 struct tgsi_dst_register tmp_dst
;
1799 struct tgsi_src_register
*tmp_src
;
1800 struct tgsi_src_register constval0
;
1801 /* final instruction for CMP() uses orig src1 and src2: */
1802 struct tgsi_dst_register
*dst
= get_dst(ctx
, inst
);
1803 struct tgsi_src_register
*a0
, *a1
, *a2
;
1806 tmp_src
= get_internal_temp(ctx
, &tmp_dst
);
1808 a0
= &inst
->Src
[0].Register
; /* a */
1809 a1
= &inst
->Src
[1].Register
; /* b */
1811 switch (t
->tgsi_opc
) {
1812 case TGSI_OPCODE_SEQ
:
1813 case TGSI_OPCODE_FSEQ
:
1814 condition
= IR3_COND_EQ
;
1816 case TGSI_OPCODE_SNE
:
1817 case TGSI_OPCODE_FSNE
:
1818 condition
= IR3_COND_NE
;
1820 case TGSI_OPCODE_SGE
:
1821 case TGSI_OPCODE_FSGE
:
1822 condition
= IR3_COND_GE
;
1824 case TGSI_OPCODE_SLT
:
1825 case TGSI_OPCODE_FSLT
:
1826 condition
= IR3_COND_LT
;
1828 case TGSI_OPCODE_SLE
:
1829 condition
= IR3_COND_LE
;
1831 case TGSI_OPCODE_SGT
:
1832 condition
= IR3_COND_GT
;
1834 case TGSI_OPCODE_CMP
:
1835 get_immediate(ctx
, &constval0
, fui(0.0));
1836 a0
= &inst
->Src
[0].Register
; /* a */
1837 a1
= &constval0
; /* {0.0} */
1838 condition
= IR3_COND_LT
;
1841 compile_assert(ctx
, 0);
1845 if (is_const(a0
) && is_const(a1
))
1846 a0
= get_unconst(ctx
, a0
);
1848 /* cmps.f.<cond> tmp, a0, a1 */
1849 instr
= instr_create(ctx
, 2, OPC_CMPS_F
);
1850 instr
->cat2
.condition
= condition
;
1851 vectorize(ctx
, instr
, &tmp_dst
, 2, a0
, 0, a1
, 0);
1853 switch (t
->tgsi_opc
) {
1854 case TGSI_OPCODE_SEQ
:
1855 case TGSI_OPCODE_SGE
:
1856 case TGSI_OPCODE_SLE
:
1857 case TGSI_OPCODE_SNE
:
1858 case TGSI_OPCODE_SGT
:
1859 case TGSI_OPCODE_SLT
:
1860 /* cov.u16f16 dst, tmp0 */
1861 instr
= instr_create(ctx
, 1, 0);
1862 instr
->cat1
.src_type
= get_utype(ctx
);
1863 instr
->cat1
.dst_type
= get_ftype(ctx
);
1864 vectorize(ctx
, instr
, dst
, 1, tmp_src
, 0);
1866 case TGSI_OPCODE_FSEQ
:
1867 case TGSI_OPCODE_FSGE
:
1868 case TGSI_OPCODE_FSNE
:
1869 case TGSI_OPCODE_FSLT
:
1870 /* absneg.s dst, (neg)tmp0 */
1871 instr
= instr_create(ctx
, 2, OPC_ABSNEG_S
);
1872 vectorize(ctx
, instr
, dst
, 1, tmp_src
, IR3_REG_NEGATE
);
1874 case TGSI_OPCODE_CMP
:
1875 a1
= &inst
->Src
[1].Register
;
1876 a2
= &inst
->Src
[2].Register
;
1877 /* sel.{b32,b16} dst, src2, tmp, src1 */
1878 instr
= instr_create(ctx
, 3, OPC_SEL_B32
);
1879 vectorize(ctx
, instr
, dst
, 3, a1
, 0, tmp_src
, 0, a2
, 0);
1884 put_dst(ctx
, inst
, dst
);
1888 * USNE(a,b) = (a != b) ? ~0 : 0
1889 * cmps.u32.ne dst, a, b
1891 * USEQ(a,b) = (a == b) ? ~0 : 0
1892 * cmps.u32.eq dst, a, b
1894 * ISGE(a,b) = (a > b) ? ~0 : 0
1895 * cmps.s32.ge dst, a, b
1897 * USGE(a,b) = (a > b) ? ~0 : 0
1898 * cmps.u32.ge dst, a, b
1900 * ISLT(a,b) = (a < b) ? ~0 : 0
1901 * cmps.s32.lt dst, a, b
1903 * USLT(a,b) = (a < b) ? ~0 : 0
1904 * cmps.u32.lt dst, a, b
1908 trans_icmp(const struct instr_translater
*t
,
1909 struct ir3_compile_context
*ctx
,
1910 struct tgsi_full_instruction
*inst
)
1912 struct ir3_instruction
*instr
;
1913 struct tgsi_dst_register
*dst
= get_dst(ctx
, inst
);
1914 struct tgsi_dst_register tmp_dst
;
1915 struct tgsi_src_register
*tmp_src
;
1916 struct tgsi_src_register
*a0
, *a1
;
1919 a0
= &inst
->Src
[0].Register
; /* a */
1920 a1
= &inst
->Src
[1].Register
; /* b */
1922 switch (t
->tgsi_opc
) {
1923 case TGSI_OPCODE_USNE
:
1924 condition
= IR3_COND_NE
;
1926 case TGSI_OPCODE_USEQ
:
1927 condition
= IR3_COND_EQ
;
1929 case TGSI_OPCODE_ISGE
:
1930 case TGSI_OPCODE_USGE
:
1931 condition
= IR3_COND_GE
;
1933 case TGSI_OPCODE_ISLT
:
1934 case TGSI_OPCODE_USLT
:
1935 condition
= IR3_COND_LT
;
1939 compile_assert(ctx
, 0);
1943 if (is_const(a0
) && is_const(a1
))
1944 a0
= get_unconst(ctx
, a0
);
1946 tmp_src
= get_internal_temp(ctx
, &tmp_dst
);
1947 /* cmps.{u32,s32}.<cond> tmp, a0, a1 */
1948 instr
= instr_create(ctx
, 2, t
->opc
);
1949 instr
->cat2
.condition
= condition
;
1950 vectorize(ctx
, instr
, &tmp_dst
, 2, a0
, 0, a1
, 0);
1952 /* absneg.s dst, (neg)tmp */
1953 instr
= instr_create(ctx
, 2, OPC_ABSNEG_S
);
1954 vectorize(ctx
, instr
, dst
, 1, tmp_src
, IR3_REG_NEGATE
);
1956 put_dst(ctx
, inst
, dst
);
1960 * UCMP(a,b,c) = a ? b : c
1961 * sel.b16 dst, b, a, c
1964 trans_ucmp(const struct instr_translater
*t
,
1965 struct ir3_compile_context
*ctx
,
1966 struct tgsi_full_instruction
*inst
)
1968 struct ir3_instruction
*instr
;
1969 struct tgsi_dst_register
*dst
= get_dst(ctx
, inst
);
1970 struct tgsi_src_register
*a0
, *a1
, *a2
;
1972 a0
= &inst
->Src
[0].Register
; /* a */
1973 a1
= &inst
->Src
[1].Register
; /* b */
1974 a2
= &inst
->Src
[2].Register
; /* c */
1976 if (is_rel_or_const(a0
))
1977 a0
= get_unconst(ctx
, a0
);
1979 /* sel.{b32,b16} dst, b, a, c */
1980 instr
= instr_create(ctx
, 3, OPC_SEL_B32
);
1981 vectorize(ctx
, instr
, dst
, 3, a1
, 0, a0
, 0, a2
, 0);
1982 put_dst(ctx
, inst
, dst
);
1986 * ISSG(a) = a < 0 ? -1 : a > 0 ? 1 : 0
1987 * cmps.s.lt tmp_neg, a, 0 # 1 if a is negative
1988 * cmps.s.gt tmp_pos, a, 0 # 1 if a is positive
1989 * sub.u dst, tmp_pos, tmp_neg
1992 trans_issg(const struct instr_translater
*t
,
1993 struct ir3_compile_context
*ctx
,
1994 struct tgsi_full_instruction
*inst
)
1996 struct ir3_instruction
*instr
;
1997 struct tgsi_dst_register
*dst
= get_dst(ctx
, inst
);
1998 struct tgsi_src_register
*a
= &inst
->Src
[0].Register
;
1999 struct tgsi_dst_register neg_dst
, pos_dst
;
2000 struct tgsi_src_register
*neg_src
, *pos_src
;
2002 neg_src
= get_internal_temp(ctx
, &neg_dst
);
2003 pos_src
= get_internal_temp(ctx
, &pos_dst
);
2005 /* cmps.s.lt neg, a, 0 */
2006 instr
= instr_create(ctx
, 2, OPC_CMPS_S
);
2007 instr
->cat2
.condition
= IR3_COND_LT
;
2008 vectorize(ctx
, instr
, &neg_dst
, 2, a
, 0, 0, IR3_REG_IMMED
);
2010 /* cmps.s.gt pos, a, 0 */
2011 instr
= instr_create(ctx
, 2, OPC_CMPS_S
);
2012 instr
->cat2
.condition
= IR3_COND_GT
;
2013 vectorize(ctx
, instr
, &pos_dst
, 2, a
, 0, 0, IR3_REG_IMMED
);
2015 /* sub.u dst, pos, neg */
2016 instr
= instr_create(ctx
, 2, OPC_SUB_U
);
2017 vectorize(ctx
, instr
, dst
, 2, pos_src
, 0, neg_src
, 0);
2019 put_dst(ctx
, inst
, dst
);
2025 * Conditional / Flow control
2029 push_branch(struct ir3_compile_context
*ctx
, bool inv
,
2030 struct ir3_instruction
*instr
, struct ir3_instruction
*cond
)
2032 unsigned int idx
= ctx
->branch_count
++;
2033 compile_assert(ctx
, idx
< ARRAY_SIZE(ctx
->branch
));
2034 ctx
->branch
[idx
].instr
= instr
;
2035 ctx
->branch
[idx
].inv
= inv
;
2036 /* else side of branch has same condition: */
2038 ctx
->branch
[idx
].cond
= cond
;
2041 static struct ir3_instruction
*
2042 pop_branch(struct ir3_compile_context
*ctx
)
2044 unsigned int idx
= --ctx
->branch_count
;
2045 return ctx
->branch
[idx
].instr
;
2049 trans_if(const struct instr_translater
*t
,
2050 struct ir3_compile_context
*ctx
,
2051 struct tgsi_full_instruction
*inst
)
2053 struct ir3_instruction
*instr
, *cond
;
2054 struct tgsi_src_register
*src
= &inst
->Src
[0].Register
;
2055 struct tgsi_dst_register tmp_dst
;
2056 struct tgsi_src_register
*tmp_src
;
2057 struct tgsi_src_register constval
;
2059 get_immediate(ctx
, &constval
, fui(0.0));
2060 tmp_src
= get_internal_temp(ctx
, &tmp_dst
);
2063 src
= get_unconst(ctx
, src
);
2065 /* cmps.{f,u}.ne tmp0, b, {0.0} */
2066 instr
= instr_create(ctx
, 2, t
->opc
);
2067 add_dst_reg(ctx
, instr
, &tmp_dst
, 0);
2068 add_src_reg(ctx
, instr
, src
, src
->SwizzleX
);
2069 add_src_reg(ctx
, instr
, &constval
, constval
.SwizzleX
);
2070 instr
->cat2
.condition
= IR3_COND_NE
;
2072 compile_assert(ctx
, instr
->regs
[1]->flags
& IR3_REG_SSA
); /* because get_unconst() */
2073 cond
= instr
->regs
[1]->instr
;
2075 /* meta:flow tmp0 */
2076 instr
= instr_create(ctx
, -1, OPC_META_FLOW
);
2077 ir3_reg_create(instr
, 0, 0); /* dummy dst */
2078 add_src_reg(ctx
, instr
, tmp_src
, TGSI_SWIZZLE_X
);
2080 push_branch(ctx
, false, instr
, cond
);
2081 instr
->flow
.if_block
= push_block(ctx
);
2085 trans_else(const struct instr_translater
*t
,
2086 struct ir3_compile_context
*ctx
,
2087 struct tgsi_full_instruction
*inst
)
2089 struct ir3_instruction
*instr
;
2093 instr
= pop_branch(ctx
);
2095 compile_assert(ctx
, (instr
->category
== -1) &&
2096 (instr
->opc
== OPC_META_FLOW
));
2098 push_branch(ctx
, true, instr
, NULL
);
2099 instr
->flow
.else_block
= push_block(ctx
);
2102 static struct ir3_instruction
*
2103 find_temporary(struct ir3_block
*block
, unsigned n
)
2105 if (block
->parent
&& !block
->temporaries
[n
])
2106 return find_temporary(block
->parent
, n
);
2107 return block
->temporaries
[n
];
2110 static struct ir3_instruction
*
2111 find_output(struct ir3_block
*block
, unsigned n
)
2113 if (block
->parent
&& !block
->outputs
[n
])
2114 return find_output(block
->parent
, n
);
2115 return block
->outputs
[n
];
2118 static struct ir3_instruction
*
2119 create_phi(struct ir3_compile_context
*ctx
, struct ir3_instruction
*cond
,
2120 struct ir3_instruction
*a
, struct ir3_instruction
*b
)
2122 struct ir3_instruction
*phi
;
2124 compile_assert(ctx
, cond
);
2126 /* Either side of the condition could be null.. which
2127 * indicates a variable written on only one side of the
2128 * branch. Normally this should only be variables not
2129 * used outside of that side of the branch. So we could
2130 * just 'return a ? a : b;' in that case. But for better
2131 * defined undefined behavior we just stick in imm{0.0}.
2132 * In the common case of a value only used within the
2133 * one side of the branch, the PHI instruction will not
2137 a
= create_immed(ctx
, 0.0);
2139 b
= create_immed(ctx
, 0.0);
2141 phi
= instr_create(ctx
, -1, OPC_META_PHI
);
2142 ir3_reg_create(phi
, 0, 0); /* dummy dst */
2143 ir3_reg_create(phi
, 0, IR3_REG_SSA
)->instr
= cond
;
2144 ir3_reg_create(phi
, 0, IR3_REG_SSA
)->instr
= a
;
2145 ir3_reg_create(phi
, 0, IR3_REG_SSA
)->instr
= b
;
2151 trans_endif(const struct instr_translater
*t
,
2152 struct ir3_compile_context
*ctx
,
2153 struct tgsi_full_instruction
*inst
)
2155 struct ir3_instruction
*instr
;
2156 struct ir3_block
*ifb
, *elseb
;
2157 struct ir3_instruction
**ifout
, **elseout
;
2158 unsigned i
, ifnout
= 0, elsenout
= 0;
2162 instr
= pop_branch(ctx
);
2164 compile_assert(ctx
, (instr
->category
== -1) &&
2165 (instr
->opc
== OPC_META_FLOW
));
2167 ifb
= instr
->flow
.if_block
;
2168 elseb
= instr
->flow
.else_block
;
2169 /* if there is no else block, the parent block is used for the
2170 * branch-not-taken src of the PHI instructions:
2173 elseb
= ifb
->parent
;
2175 /* worst case sizes: */
2176 ifnout
= ifb
->ntemporaries
+ ifb
->noutputs
;
2177 elsenout
= elseb
->ntemporaries
+ elseb
->noutputs
;
2179 ifout
= ir3_alloc(ctx
->ir
, sizeof(ifb
->outputs
[0]) * ifnout
);
2180 if (elseb
!= ifb
->parent
)
2181 elseout
= ir3_alloc(ctx
->ir
, sizeof(ifb
->outputs
[0]) * elsenout
);
2186 /* generate PHI instructions for any temporaries written: */
2187 for (i
= 0; i
< ifb
->ntemporaries
; i
++) {
2188 struct ir3_instruction
*a
= ifb
->temporaries
[i
];
2189 struct ir3_instruction
*b
= elseb
->temporaries
[i
];
2191 /* if temporary written in if-block, or if else block
2192 * is present and temporary written in else-block:
2194 if (a
|| ((elseb
!= ifb
->parent
) && b
)) {
2195 struct ir3_instruction
*phi
;
2197 /* if only written on one side, find the closest
2198 * enclosing update on other side:
2201 a
= find_temporary(ifb
, i
);
2203 b
= find_temporary(elseb
, i
);
2206 a
= create_output(ifb
, a
, ifnout
++);
2208 if (elseb
!= ifb
->parent
) {
2209 elseout
[elsenout
] = b
;
2210 b
= create_output(elseb
, b
, elsenout
++);
2213 phi
= create_phi(ctx
, instr
, a
, b
);
2214 ctx
->block
->temporaries
[i
] = phi
;
2218 compile_assert(ctx
, ifb
->noutputs
== elseb
->noutputs
);
2220 /* .. and any outputs written: */
2221 for (i
= 0; i
< ifb
->noutputs
; i
++) {
2222 struct ir3_instruction
*a
= ifb
->outputs
[i
];
2223 struct ir3_instruction
*b
= elseb
->outputs
[i
];
2225 /* if output written in if-block, or if else block
2226 * is present and output written in else-block:
2228 if (a
|| ((elseb
!= ifb
->parent
) && b
)) {
2229 struct ir3_instruction
*phi
;
2231 /* if only written on one side, find the closest
2232 * enclosing update on other side:
2235 a
= find_output(ifb
, i
);
2237 b
= find_output(elseb
, i
);
2240 a
= create_output(ifb
, a
, ifnout
++);
2242 if (elseb
!= ifb
->parent
) {
2243 elseout
[elsenout
] = b
;
2244 b
= create_output(elseb
, b
, elsenout
++);
2247 phi
= create_phi(ctx
, instr
, a
, b
);
2248 ctx
->block
->outputs
[i
] = phi
;
2252 ifb
->noutputs
= ifnout
;
2253 ifb
->outputs
= ifout
;
2255 if (elseb
!= ifb
->parent
) {
2256 elseb
->noutputs
= elsenout
;
2257 elseb
->outputs
= elseout
;
2260 // TODO maybe we want to compact block->inputs?
2268 trans_kill(const struct instr_translater
*t
,
2269 struct ir3_compile_context
*ctx
,
2270 struct tgsi_full_instruction
*inst
)
2272 struct ir3_instruction
*instr
, *immed
, *cond
= NULL
;
2275 /* unconditional kill, use enclosing if condition: */
2276 if (ctx
->branch_count
> 0) {
2277 unsigned int idx
= ctx
->branch_count
- 1;
2278 cond
= ctx
->branch
[idx
].cond
;
2279 inv
= ctx
->branch
[idx
].inv
;
2281 cond
= create_immed(ctx
, 1.0);
2284 compile_assert(ctx
, cond
);
2286 immed
= create_immed(ctx
, 0.0);
2288 /* cmps.f.ne p0.x, cond, {0.0} */
2289 instr
= instr_create(ctx
, 2, OPC_CMPS_F
);
2290 instr
->cat2
.condition
= IR3_COND_NE
;
2291 ir3_reg_create(instr
, regid(REG_P0
, 0), 0);
2292 ir3_reg_create(instr
, 0, IR3_REG_SSA
)->instr
= cond
;
2293 ir3_reg_create(instr
, 0, IR3_REG_SSA
)->instr
= immed
;
2297 instr
= instr_create(ctx
, 0, OPC_KILL
);
2298 instr
->cat0
.inv
= inv
;
2299 ir3_reg_create(instr
, 0, 0); /* dummy dst */
2300 ir3_reg_create(instr
, 0, IR3_REG_SSA
)->instr
= cond
;
2302 ctx
->kill
[ctx
->kill_count
++] = instr
;
2304 ctx
->so
->has_kill
= true;
2312 trans_killif(const struct instr_translater
*t
,
2313 struct ir3_compile_context
*ctx
,
2314 struct tgsi_full_instruction
*inst
)
2316 struct tgsi_src_register
*src
= &inst
->Src
[0].Register
;
2317 struct ir3_instruction
*instr
, *immed
, *cond
= NULL
;
2320 immed
= create_immed(ctx
, 0.0);
2322 /* cmps.f.ne p0.x, cond, {0.0} */
2323 instr
= instr_create(ctx
, 2, OPC_CMPS_F
);
2324 instr
->cat2
.condition
= IR3_COND_NE
;
2325 ir3_reg_create(instr
, regid(REG_P0
, 0), 0);
2326 ir3_reg_create(instr
, 0, IR3_REG_SSA
)->instr
= immed
;
2327 add_src_reg(ctx
, instr
, src
, src
->SwizzleX
);
2332 instr
= instr_create(ctx
, 0, OPC_KILL
);
2333 instr
->cat0
.inv
= inv
;
2334 ir3_reg_create(instr
, 0, 0); /* dummy dst */
2335 ir3_reg_create(instr
, 0, IR3_REG_SSA
)->instr
= cond
;
2337 ctx
->kill
[ctx
->kill_count
++] = instr
;
2339 ctx
->so
->has_kill
= true;
2343 * I2F / U2F / F2I / F2U
2347 trans_cov(const struct instr_translater
*t
,
2348 struct ir3_compile_context
*ctx
,
2349 struct tgsi_full_instruction
*inst
)
2351 struct ir3_instruction
*instr
;
2352 struct tgsi_dst_register
*dst
= get_dst(ctx
, inst
);
2353 struct tgsi_src_register
*src
= &inst
->Src
[0].Register
;
2355 // cov.f32s32 dst, tmp0 /
2356 instr
= instr_create(ctx
, 1, 0);
2357 switch (t
->tgsi_opc
) {
2358 case TGSI_OPCODE_U2F
:
2359 instr
->cat1
.src_type
= TYPE_U32
;
2360 instr
->cat1
.dst_type
= TYPE_F32
;
2362 case TGSI_OPCODE_I2F
:
2363 instr
->cat1
.src_type
= TYPE_S32
;
2364 instr
->cat1
.dst_type
= TYPE_F32
;
2366 case TGSI_OPCODE_F2U
:
2367 instr
->cat1
.src_type
= TYPE_F32
;
2368 instr
->cat1
.dst_type
= TYPE_U32
;
2370 case TGSI_OPCODE_F2I
:
2371 instr
->cat1
.src_type
= TYPE_F32
;
2372 instr
->cat1
.dst_type
= TYPE_S32
;
2376 vectorize(ctx
, instr
, dst
, 1, src
, 0);
2377 put_dst(ctx
, inst
, dst
);
2383 * There is no 32-bit multiply instruction, so splitting a and b into high and
2384 * low components, we get that
2386 * dst = al * bl + ah * bl << 16 + al * bh << 16
2388 * mull.u tmp0, a, b (mul low, i.e. al * bl)
2389 * madsh.m16 tmp1, a, b, tmp0 (mul-add shift high mix, i.e. ah * bl << 16)
2390 * madsh.m16 dst, b, a, tmp1 (i.e. al * bh << 16)
2392 * For UMAD, add in the extra argument after mull.u.
2395 trans_umul(const struct instr_translater
*t
,
2396 struct ir3_compile_context
*ctx
,
2397 struct tgsi_full_instruction
*inst
)
2399 struct ir3_instruction
*instr
;
2400 struct tgsi_dst_register
*dst
= get_dst(ctx
, inst
);
2401 struct tgsi_src_register
*a
= &inst
->Src
[0].Register
;
2402 struct tgsi_src_register
*b
= &inst
->Src
[1].Register
;
2404 struct tgsi_dst_register tmp0_dst
, tmp1_dst
;
2405 struct tgsi_src_register
*tmp0_src
, *tmp1_src
;
2407 tmp0_src
= get_internal_temp(ctx
, &tmp0_dst
);
2408 tmp1_src
= get_internal_temp(ctx
, &tmp1_dst
);
2410 if (is_rel_or_const(a
))
2411 a
= get_unconst(ctx
, a
);
2412 if (is_rel_or_const(b
))
2413 b
= get_unconst(ctx
, b
);
2415 /* mull.u tmp0, a, b */
2416 instr
= instr_create(ctx
, 2, OPC_MULL_U
);
2417 vectorize(ctx
, instr
, &tmp0_dst
, 2, a
, 0, b
, 0);
2419 if (t
->tgsi_opc
== TGSI_OPCODE_UMAD
) {
2420 struct tgsi_src_register
*c
= &inst
->Src
[2].Register
;
2422 /* add.u tmp0, tmp0, c */
2423 instr
= instr_create(ctx
, 2, OPC_ADD_U
);
2424 vectorize(ctx
, instr
, &tmp0_dst
, 2, tmp0_src
, 0, c
, 0);
2427 /* madsh.m16 tmp1, a, b, tmp0 */
2428 instr
= instr_create(ctx
, 3, OPC_MADSH_M16
);
2429 vectorize(ctx
, instr
, &tmp1_dst
, 3, a
, 0, b
, 0, tmp0_src
, 0);
2431 /* madsh.m16 dst, b, a, tmp1 */
2432 instr
= instr_create(ctx
, 3, OPC_MADSH_M16
);
2433 vectorize(ctx
, instr
, dst
, 3, b
, 0, a
, 0, tmp1_src
, 0);
2434 put_dst(ctx
, inst
, dst
);
2438 * IDIV / UDIV / MOD / UMOD
2440 * See NV50LegalizeSSA::handleDIV for the origin of this implementation. For
2441 * MOD/UMOD, it becomes a - [IU]DIV(a, modulus) * modulus.
2444 trans_idiv(const struct instr_translater
*t
,
2445 struct ir3_compile_context
*ctx
,
2446 struct tgsi_full_instruction
*inst
)
2448 struct ir3_instruction
*instr
;
2449 struct tgsi_dst_register
*dst
= get_dst(ctx
, inst
), *premod_dst
= dst
;
2450 struct tgsi_src_register
*a
= &inst
->Src
[0].Register
;
2451 struct tgsi_src_register
*b
= &inst
->Src
[1].Register
;
2453 struct tgsi_dst_register af_dst
, bf_dst
, q_dst
, r_dst
, a_dst
, b_dst
;
2454 struct tgsi_src_register
*af_src
, *bf_src
, *q_src
, *r_src
, *a_src
, *b_src
;
2456 struct tgsi_src_register negative_2
, thirty_one
;
2459 if (t
->tgsi_opc
== TGSI_OPCODE_IDIV
|| t
->tgsi_opc
== TGSI_OPCODE_MOD
)
2460 src_type
= get_stype(ctx
);
2462 src_type
= get_utype(ctx
);
2464 af_src
= get_internal_temp(ctx
, &af_dst
);
2465 bf_src
= get_internal_temp(ctx
, &bf_dst
);
2466 q_src
= get_internal_temp(ctx
, &q_dst
);
2467 r_src
= get_internal_temp(ctx
, &r_dst
);
2468 a_src
= get_internal_temp(ctx
, &a_dst
);
2469 b_src
= get_internal_temp(ctx
, &b_dst
);
2471 get_immediate(ctx
, &negative_2
, -2);
2472 get_immediate(ctx
, &thirty_one
, 31);
2474 if (t
->tgsi_opc
== TGSI_OPCODE_MOD
|| t
->tgsi_opc
== TGSI_OPCODE_UMOD
)
2475 premod_dst
= &q_dst
;
2477 /* cov.[us]32f32 af, numerator */
2478 instr
= instr_create(ctx
, 1, 0);
2479 instr
->cat1
.src_type
= src_type
;
2480 instr
->cat1
.dst_type
= get_ftype(ctx
);
2481 vectorize(ctx
, instr
, &af_dst
, 1, a
, 0);
2483 /* cov.[us]32f32 bf, denominator */
2484 instr
= instr_create(ctx
, 1, 0);
2485 instr
->cat1
.src_type
= src_type
;
2486 instr
->cat1
.dst_type
= get_ftype(ctx
);
2487 vectorize(ctx
, instr
, &bf_dst
, 1, b
, 0);
2489 /* Get the absolute values for IDIV */
2490 if (type_sint(src_type
)) {
2491 /* absneg.f af, (abs)af */
2492 instr
= instr_create(ctx
, 2, OPC_ABSNEG_F
);
2493 vectorize(ctx
, instr
, &af_dst
, 1, af_src
, IR3_REG_ABS
);
2495 /* absneg.f bf, (abs)bf */
2496 instr
= instr_create(ctx
, 2, OPC_ABSNEG_F
);
2497 vectorize(ctx
, instr
, &bf_dst
, 1, bf_src
, IR3_REG_ABS
);
2499 /* absneg.s a, (abs)numerator */
2500 instr
= instr_create(ctx
, 2, OPC_ABSNEG_S
);
2501 vectorize(ctx
, instr
, &a_dst
, 1, a
, IR3_REG_ABS
);
2503 /* absneg.s b, (abs)denominator */
2504 instr
= instr_create(ctx
, 2, OPC_ABSNEG_S
);
2505 vectorize(ctx
, instr
, &b_dst
, 1, b
, IR3_REG_ABS
);
2507 /* mov.u32u32 a, numerator */
2508 instr
= instr_create(ctx
, 1, 0);
2509 instr
->cat1
.src_type
= src_type
;
2510 instr
->cat1
.dst_type
= src_type
;
2511 vectorize(ctx
, instr
, &a_dst
, 1, a
, 0);
2513 /* mov.u32u32 b, denominator */
2514 instr
= instr_create(ctx
, 1, 0);
2515 instr
->cat1
.src_type
= src_type
;
2516 instr
->cat1
.dst_type
= src_type
;
2517 vectorize(ctx
, instr
, &b_dst
, 1, b
, 0);
2521 instr
= instr_create(ctx
, 4, OPC_RCP
);
2522 vectorize(ctx
, instr
, &bf_dst
, 1, bf_src
, 0);
2524 /* That's right, subtract 2 as an integer from the float */
2525 /* add.u bf, bf, -2 */
2526 instr
= instr_create(ctx
, 2, OPC_ADD_U
);
2527 vectorize(ctx
, instr
, &bf_dst
, 2, bf_src
, 0, &negative_2
, 0);
2529 /* mul.f q, af, bf */
2530 instr
= instr_create(ctx
, 2, OPC_MUL_F
);
2531 vectorize(ctx
, instr
, &q_dst
, 2, af_src
, 0, bf_src
, 0);
2533 /* cov.f32[us]32 q, q */
2534 instr
= instr_create(ctx
, 1, 0);
2535 instr
->cat1
.src_type
= get_ftype(ctx
);
2536 instr
->cat1
.dst_type
= src_type
;
2537 vectorize(ctx
, instr
, &q_dst
, 1, q_src
, 0);
2539 /* integer multiply q by b */
2540 /* mull.u r, q, b */
2541 instr
= instr_create(ctx
, 2, OPC_MULL_U
);
2542 vectorize(ctx
, instr
, &r_dst
, 2, q_src
, 0, b_src
, 0);
2544 /* madsh.m16 r, q, b, r */
2545 instr
= instr_create(ctx
, 3, OPC_MADSH_M16
);
2546 vectorize(ctx
, instr
, &r_dst
, 3, q_src
, 0, b_src
, 0, r_src
, 0);
2548 /* madsh.m16, r, b, q, r */
2549 instr
= instr_create(ctx
, 3, OPC_MADSH_M16
);
2550 vectorize(ctx
, instr
, &r_dst
, 3, b_src
, 0, q_src
, 0, r_src
, 0);
2553 instr
= instr_create(ctx
, 2, OPC_SUB_U
);
2554 vectorize(ctx
, instr
, &r_dst
, 2, a_src
, 0, r_src
, 0);
2556 /* cov.u32f32, r, r */
2557 instr
= instr_create(ctx
, 1, 0);
2558 instr
->cat1
.src_type
= get_utype(ctx
);
2559 instr
->cat1
.dst_type
= get_ftype(ctx
);
2560 vectorize(ctx
, instr
, &r_dst
, 1, r_src
, 0);
2562 /* mul.f r, r, bf */
2563 instr
= instr_create(ctx
, 2, OPC_MUL_F
);
2564 vectorize(ctx
, instr
, &r_dst
, 2, r_src
, 0, bf_src
, 0);
2566 /* cov.f32u32 r, r */
2567 instr
= instr_create(ctx
, 1, 0);
2568 instr
->cat1
.src_type
= get_ftype(ctx
);
2569 instr
->cat1
.dst_type
= get_utype(ctx
);
2570 vectorize(ctx
, instr
, &r_dst
, 1, r_src
, 0);
2573 instr
= instr_create(ctx
, 2, OPC_ADD_U
);
2574 vectorize(ctx
, instr
, &q_dst
, 2, q_src
, 0, r_src
, 0);
2576 /* mull.u r, q, b */
2577 instr
= instr_create(ctx
, 2, OPC_MULL_U
);
2578 vectorize(ctx
, instr
, &r_dst
, 2, q_src
, 0, b_src
, 0);
2580 /* madsh.m16 r, q, b, r */
2581 instr
= instr_create(ctx
, 3, OPC_MADSH_M16
);
2582 vectorize(ctx
, instr
, &r_dst
, 3, q_src
, 0, b_src
, 0, r_src
, 0);
2584 /* madsh.m16 r, b, q, r */
2585 instr
= instr_create(ctx
, 3, OPC_MADSH_M16
);
2586 vectorize(ctx
, instr
, &r_dst
, 3, b_src
, 0, q_src
, 0, r_src
, 0);
2589 instr
= instr_create(ctx
, 2, OPC_SUB_U
);
2590 vectorize(ctx
, instr
, &r_dst
, 2, a_src
, 0, r_src
, 0);
2592 /* cmps.u.ge r, r, b */
2593 instr
= instr_create(ctx
, 2, OPC_CMPS_U
);
2594 instr
->cat2
.condition
= IR3_COND_GE
;
2595 vectorize(ctx
, instr
, &r_dst
, 2, r_src
, 0, b_src
, 0);
2597 if (type_uint(src_type
)) {
2598 /* add.u dst, q, r */
2599 instr
= instr_create(ctx
, 2, OPC_ADD_U
);
2600 vectorize(ctx
, instr
, premod_dst
, 2, q_src
, 0, r_src
, 0);
2603 instr
= instr_create(ctx
, 2, OPC_ADD_U
);
2604 vectorize(ctx
, instr
, &q_dst
, 2, q_src
, 0, r_src
, 0);
2606 /* negate result based on the original arguments */
2607 if (is_const(a
) && is_const(b
))
2608 a
= get_unconst(ctx
, a
);
2610 /* xor.b r, numerator, denominator */
2611 instr
= instr_create(ctx
, 2, OPC_XOR_B
);
2612 vectorize(ctx
, instr
, &r_dst
, 2, a
, 0, b
, 0);
2614 /* shr.b r, r, 31 */
2615 instr
= instr_create(ctx
, 2, OPC_SHR_B
);
2616 vectorize(ctx
, instr
, &r_dst
, 2, r_src
, 0, &thirty_one
, 0);
2618 /* absneg.s b, (neg)q */
2619 instr
= instr_create(ctx
, 2, OPC_ABSNEG_S
);
2620 vectorize(ctx
, instr
, &b_dst
, 1, q_src
, IR3_REG_NEGATE
);
2622 /* sel.b dst, b, r, q */
2623 instr
= instr_create(ctx
, 3, OPC_SEL_B32
);
2624 vectorize(ctx
, instr
, premod_dst
, 3, b_src
, 0, r_src
, 0, q_src
, 0);
2627 if (t
->tgsi_opc
== TGSI_OPCODE_MOD
|| t
->tgsi_opc
== TGSI_OPCODE_UMOD
) {
2628 /* The division result will have ended up in q. */
2630 if (is_rel_or_const(b
))
2631 b
= get_unconst(ctx
, b
);
2633 /* mull.u r, q, b */
2634 instr
= instr_create(ctx
, 2, OPC_MULL_U
);
2635 vectorize(ctx
, instr
, &r_dst
, 2, q_src
, 0, b
, 0);
2637 /* madsh.m16 r, q, b, r */
2638 instr
= instr_create(ctx
, 3, OPC_MADSH_M16
);
2639 vectorize(ctx
, instr
, &r_dst
, 3, q_src
, 0, b
, 0, r_src
, 0);
2641 /* madsh.m16 r, b, q, r */
2642 instr
= instr_create(ctx
, 3, OPC_MADSH_M16
);
2643 vectorize(ctx
, instr
, &r_dst
, 3, b
, 0, q_src
, 0, r_src
, 0);
2645 /* sub.u dst, a, r */
2646 instr
= instr_create(ctx
, 2, OPC_SUB_U
);
2647 vectorize(ctx
, instr
, dst
, 2, a
, 0, r_src
, 0);
2650 put_dst(ctx
, inst
, dst
);
2654 * Handlers for TGSI instructions which do have 1:1 mapping to native
2659 instr_cat0(const struct instr_translater
*t
,
2660 struct ir3_compile_context
*ctx
,
2661 struct tgsi_full_instruction
*inst
)
2663 instr_create(ctx
, 0, t
->opc
);
2667 instr_cat1(const struct instr_translater
*t
,
2668 struct ir3_compile_context
*ctx
,
2669 struct tgsi_full_instruction
*inst
)
2671 struct tgsi_dst_register
*dst
= &inst
->Dst
[0].Register
;
2672 struct tgsi_src_register
*src
= &inst
->Src
[0].Register
;
2674 /* NOTE: atomic start/end, rather than in create_mov() since
2675 * create_mov() is used already w/in atomic sequences (and
2676 * we aren't clever enough to deal with the nesting)
2678 instr_atomic_start(ctx
);
2679 create_mov(ctx
, dst
, src
);
2680 instr_atomic_end(ctx
);
2684 instr_cat2(const struct instr_translater
*t
,
2685 struct ir3_compile_context
*ctx
,
2686 struct tgsi_full_instruction
*inst
)
2688 struct tgsi_dst_register
*dst
= get_dst(ctx
, inst
);
2689 struct tgsi_src_register
*src0
= &inst
->Src
[0].Register
;
2690 struct tgsi_src_register
*src1
= &inst
->Src
[1].Register
;
2691 struct ir3_instruction
*instr
;
2692 unsigned src0_flags
= 0, src1_flags
= 0;
2694 switch (t
->tgsi_opc
) {
2695 case TGSI_OPCODE_ABS
:
2696 case TGSI_OPCODE_IABS
:
2697 src0_flags
= IR3_REG_ABS
;
2699 case TGSI_OPCODE_INEG
:
2700 src0_flags
= IR3_REG_NEGATE
;
2702 case TGSI_OPCODE_SUB
:
2703 src1_flags
= IR3_REG_NEGATE
;
2722 /* these only have one src reg */
2723 instr
= instr_create(ctx
, 2, t
->opc
);
2724 vectorize(ctx
, instr
, dst
, 1, src0
, src0_flags
);
2727 if (is_const(src0
) && is_const(src1
))
2728 src0
= get_unconst(ctx
, src0
);
2730 instr
= instr_create(ctx
, 2, t
->opc
);
2731 vectorize(ctx
, instr
, dst
, 2, src0
, src0_flags
,
2736 put_dst(ctx
, inst
, dst
);
2740 instr_cat3(const struct instr_translater
*t
,
2741 struct ir3_compile_context
*ctx
,
2742 struct tgsi_full_instruction
*inst
)
2744 struct tgsi_dst_register
*dst
= get_dst(ctx
, inst
);
2745 struct tgsi_src_register
*src0
= &inst
->Src
[0].Register
;
2746 struct tgsi_src_register
*src1
= &inst
->Src
[1].Register
;
2747 struct ir3_instruction
*instr
;
2749 /* in particular, can't handle const for src1 for cat3..
2750 * for mad, we can swap first two src's if needed:
2752 if (is_rel_or_const(src1
)) {
2753 if (is_mad(t
->opc
) && !is_rel_or_const(src0
)) {
2754 struct tgsi_src_register
*tmp
;
2759 src1
= get_unconst(ctx
, src1
);
2763 instr
= instr_create(ctx
, 3, t
->opc
);
2764 vectorize(ctx
, instr
, dst
, 3, src0
, 0, src1
, 0,
2765 &inst
->Src
[2].Register
, 0);
2766 put_dst(ctx
, inst
, dst
);
2770 instr_cat4(const struct instr_translater
*t
,
2771 struct ir3_compile_context
*ctx
,
2772 struct tgsi_full_instruction
*inst
)
2774 struct tgsi_dst_register
*dst
= get_dst(ctx
, inst
);
2775 struct tgsi_src_register
*src
= &inst
->Src
[0].Register
;
2776 struct ir3_instruction
*instr
;
2779 /* seems like blob compiler avoids const as src.. */
2781 src
= get_unconst(ctx
, src
);
2783 /* we need to replicate into each component: */
2784 for (i
= 0; i
< 4; i
++) {
2785 if (dst
->WriteMask
& (1 << i
)) {
2786 instr
= instr_create(ctx
, 4, t
->opc
);
2787 add_dst_reg(ctx
, instr
, dst
, i
);
2788 add_src_reg(ctx
, instr
, src
, src
->SwizzleX
);
2792 put_dst(ctx
, inst
, dst
);
2795 static const struct instr_translater translaters
[TGSI_OPCODE_LAST
] = {
2796 #define INSTR(n, f, ...) \
2797 [TGSI_OPCODE_ ## n] = { .fxn = (f), .tgsi_opc = TGSI_OPCODE_ ## n, ##__VA_ARGS__ }
2799 INSTR(MOV
, instr_cat1
),
2800 INSTR(RCP
, instr_cat4
, .opc
= OPC_RCP
),
2801 INSTR(RSQ
, instr_cat4
, .opc
= OPC_RSQ
),
2802 INSTR(SQRT
, instr_cat4
, .opc
= OPC_SQRT
),
2803 INSTR(MUL
, instr_cat2
, .opc
= OPC_MUL_F
),
2804 INSTR(ADD
, instr_cat2
, .opc
= OPC_ADD_F
),
2805 INSTR(SUB
, instr_cat2
, .opc
= OPC_ADD_F
),
2806 INSTR(MIN
, instr_cat2
, .opc
= OPC_MIN_F
),
2807 INSTR(MAX
, instr_cat2
, .opc
= OPC_MAX_F
),
2808 INSTR(UADD
, instr_cat2
, .opc
= OPC_ADD_U
),
2809 INSTR(IMIN
, instr_cat2
, .opc
= OPC_MIN_S
),
2810 INSTR(UMIN
, instr_cat2
, .opc
= OPC_MIN_U
),
2811 INSTR(IMAX
, instr_cat2
, .opc
= OPC_MAX_S
),
2812 INSTR(UMAX
, instr_cat2
, .opc
= OPC_MAX_U
),
2813 INSTR(AND
, instr_cat2
, .opc
= OPC_AND_B
),
2814 INSTR(OR
, instr_cat2
, .opc
= OPC_OR_B
),
2815 INSTR(NOT
, instr_cat2
, .opc
= OPC_NOT_B
),
2816 INSTR(XOR
, instr_cat2
, .opc
= OPC_XOR_B
),
2817 INSTR(UMUL
, trans_umul
),
2818 INSTR(UMAD
, trans_umul
),
2819 INSTR(UDIV
, trans_idiv
),
2820 INSTR(IDIV
, trans_idiv
),
2821 INSTR(MOD
, trans_idiv
),
2822 INSTR(UMOD
, trans_idiv
),
2823 INSTR(SHL
, instr_cat2
, .opc
= OPC_SHL_B
),
2824 INSTR(USHR
, instr_cat2
, .opc
= OPC_SHR_B
),
2825 INSTR(ISHR
, instr_cat2
, .opc
= OPC_ASHR_B
),
2826 INSTR(IABS
, instr_cat2
, .opc
= OPC_ABSNEG_S
),
2827 INSTR(INEG
, instr_cat2
, .opc
= OPC_ABSNEG_S
),
2828 INSTR(AND
, instr_cat2
, .opc
= OPC_AND_B
),
2829 INSTR(MAD
, instr_cat3
, .opc
= OPC_MAD_F32
, .hopc
= OPC_MAD_F16
),
2830 INSTR(TRUNC
, instr_cat2
, .opc
= OPC_TRUNC_F
),
2831 INSTR(CLAMP
, trans_clamp
),
2832 INSTR(FLR
, instr_cat2
, .opc
= OPC_FLOOR_F
),
2833 INSTR(ROUND
, instr_cat2
, .opc
= OPC_RNDNE_F
),
2834 INSTR(SSG
, instr_cat2
, .opc
= OPC_SIGN_F
),
2835 INSTR(CEIL
, instr_cat2
, .opc
= OPC_CEIL_F
),
2836 INSTR(ARL
, trans_arl
),
2837 INSTR(UARL
, trans_arl
),
2838 INSTR(EX2
, instr_cat4
, .opc
= OPC_EXP2
),
2839 INSTR(LG2
, instr_cat4
, .opc
= OPC_LOG2
),
2840 INSTR(ABS
, instr_cat2
, .opc
= OPC_ABSNEG_F
),
2841 INSTR(COS
, instr_cat4
, .opc
= OPC_COS
),
2842 INSTR(SIN
, instr_cat4
, .opc
= OPC_SIN
),
2843 INSTR(TEX
, trans_samp
, .opc
= OPC_SAM
),
2844 INSTR(TXP
, trans_samp
, .opc
= OPC_SAM
),
2845 INSTR(TXB
, trans_samp
, .opc
= OPC_SAMB
),
2846 INSTR(TXB2
, trans_samp
, .opc
= OPC_SAMB
),
2847 INSTR(TXL
, trans_samp
, .opc
= OPC_SAML
),
2848 INSTR(TXD
, trans_samp
, .opc
= OPC_SAMGQ
),
2849 INSTR(TXF
, trans_samp
, .opc
= OPC_ISAML
),
2850 INSTR(TXQ
, trans_txq
),
2851 INSTR(DDX
, trans_deriv
, .opc
= OPC_DSX
),
2852 INSTR(DDY
, trans_deriv
, .opc
= OPC_DSY
),
2853 INSTR(SGT
, trans_cmp
),
2854 INSTR(SLT
, trans_cmp
),
2855 INSTR(FSLT
, trans_cmp
),
2856 INSTR(SGE
, trans_cmp
),
2857 INSTR(FSGE
, trans_cmp
),
2858 INSTR(SLE
, trans_cmp
),
2859 INSTR(SNE
, trans_cmp
),
2860 INSTR(FSNE
, trans_cmp
),
2861 INSTR(SEQ
, trans_cmp
),
2862 INSTR(FSEQ
, trans_cmp
),
2863 INSTR(CMP
, trans_cmp
),
2864 INSTR(USNE
, trans_icmp
, .opc
= OPC_CMPS_U
),
2865 INSTR(USEQ
, trans_icmp
, .opc
= OPC_CMPS_U
),
2866 INSTR(ISGE
, trans_icmp
, .opc
= OPC_CMPS_S
),
2867 INSTR(USGE
, trans_icmp
, .opc
= OPC_CMPS_U
),
2868 INSTR(ISLT
, trans_icmp
, .opc
= OPC_CMPS_S
),
2869 INSTR(USLT
, trans_icmp
, .opc
= OPC_CMPS_U
),
2870 INSTR(UCMP
, trans_ucmp
),
2871 INSTR(ISSG
, trans_issg
),
2872 INSTR(IF
, trans_if
, .opc
= OPC_CMPS_F
),
2873 INSTR(UIF
, trans_if
, .opc
= OPC_CMPS_U
),
2874 INSTR(ELSE
, trans_else
),
2875 INSTR(ENDIF
, trans_endif
),
2876 INSTR(END
, instr_cat0
, .opc
= OPC_END
),
2877 INSTR(KILL
, trans_kill
, .opc
= OPC_KILL
),
2878 INSTR(KILL_IF
, trans_killif
, .opc
= OPC_KILL
),
2879 INSTR(I2F
, trans_cov
),
2880 INSTR(U2F
, trans_cov
),
2881 INSTR(F2I
, trans_cov
),
2882 INSTR(F2U
, trans_cov
),
2886 decl_semantic(const struct tgsi_declaration_semantic
*sem
)
2888 return ir3_semantic_name(sem
->Name
, sem
->Index
);
2891 static struct ir3_instruction
*
2892 decl_in_frag_bary(struct ir3_compile_context
*ctx
, unsigned regid
,
2893 unsigned j
, unsigned inloc
, bool use_ldlv
)
2895 struct ir3_instruction
*instr
;
2896 struct ir3_register
*src
;
2899 /* ldlv.u32 dst, l[#inloc], 1 */
2900 instr
= instr_create(ctx
, 6, OPC_LDLV
);
2901 instr
->cat6
.type
= TYPE_U32
;
2902 instr
->cat6
.iim_val
= 1;
2903 ir3_reg_create(instr
, regid
, 0); /* dummy dst */
2904 ir3_reg_create(instr
, 0, IR3_REG_IMMED
)->iim_val
= inloc
;
2905 ir3_reg_create(instr
, 0, IR3_REG_IMMED
)->iim_val
= 1;
2910 /* bary.f dst, #inloc, r0.x */
2911 instr
= instr_create(ctx
, 2, OPC_BARY_F
);
2912 ir3_reg_create(instr
, regid
, 0); /* dummy dst */
2913 ir3_reg_create(instr
, 0, IR3_REG_IMMED
)->iim_val
= inloc
;
2914 src
= ir3_reg_create(instr
, 0, IR3_REG_SSA
);
2916 src
->instr
= ctx
->frag_pos
;
2921 /* TGSI_SEMANTIC_POSITION
2922 * """"""""""""""""""""""
2924 * For fragment shaders, TGSI_SEMANTIC_POSITION is used to indicate that
2925 * fragment shader input contains the fragment's window position. The X
2926 * component starts at zero and always increases from left to right.
2927 * The Y component starts at zero and always increases but Y=0 may either
2928 * indicate the top of the window or the bottom depending on the fragment
2929 * coordinate origin convention (see TGSI_PROPERTY_FS_COORD_ORIGIN).
2930 * The Z coordinate ranges from 0 to 1 to represent depth from the front
2931 * to the back of the Z buffer. The W component contains the reciprocol
2932 * of the interpolated vertex position W component.
2934 static struct ir3_instruction
*
2935 decl_in_frag_coord(struct ir3_compile_context
*ctx
, unsigned regid
,
2938 struct ir3_instruction
*instr
, *src
;
2940 compile_assert(ctx
, !ctx
->frag_coord
[j
]);
2942 ctx
->frag_coord
[j
] = create_input(ctx
->block
, NULL
, 0);
2948 /* for frag_coord, we get unsigned values.. we need
2949 * to subtract (integer) 8 and divide by 16 (right-
2950 * shift by 4) then convert to float:
2953 /* add.s tmp, src, -8 */
2954 instr
= instr_create(ctx
, 2, OPC_ADD_S
);
2955 ir3_reg_create(instr
, regid
, 0); /* dummy dst */
2956 ir3_reg_create(instr
, 0, IR3_REG_SSA
)->instr
= ctx
->frag_coord
[j
];
2957 ir3_reg_create(instr
, 0, IR3_REG_IMMED
)->iim_val
= -8;
2960 /* shr.b tmp, tmp, 4 */
2961 instr
= instr_create(ctx
, 2, OPC_SHR_B
);
2962 ir3_reg_create(instr
, regid
, 0); /* dummy dst */
2963 ir3_reg_create(instr
, 0, IR3_REG_SSA
)->instr
= src
;
2964 ir3_reg_create(instr
, 0, IR3_REG_IMMED
)->iim_val
= 4;
2967 /* mov.u32f32 dst, tmp */
2968 instr
= instr_create(ctx
, 1, 0);
2969 instr
->cat1
.src_type
= TYPE_U32
;
2970 instr
->cat1
.dst_type
= TYPE_F32
;
2971 ir3_reg_create(instr
, regid
, 0); /* dummy dst */
2972 ir3_reg_create(instr
, 0, IR3_REG_SSA
)->instr
= src
;
2977 /* seems that we can use these as-is: */
2978 instr
= ctx
->frag_coord
[j
];
2981 compile_error(ctx
, "invalid channel\n");
2982 instr
= create_immed(ctx
, 0.0);
2989 /* TGSI_SEMANTIC_FACE
2990 * """"""""""""""""""
2992 * This label applies to fragment shader inputs only and indicates that
2993 * the register contains front/back-face information of the form (F, 0,
2994 * 0, 1). The first component will be positive when the fragment belongs
2995 * to a front-facing polygon, and negative when the fragment belongs to a
2996 * back-facing polygon.
2998 static struct ir3_instruction
*
2999 decl_in_frag_face(struct ir3_compile_context
*ctx
, unsigned regid
,
3002 struct ir3_instruction
*instr
, *src
;
3006 compile_assert(ctx
, !ctx
->frag_face
);
3008 ctx
->frag_face
= create_input(ctx
->block
, NULL
, 0);
3010 /* for faceness, we always get -1 or 0 (int).. but TGSI expects
3011 * positive vs negative float.. and piglit further seems to
3012 * expect -1.0 or 1.0:
3014 * mul.s tmp, hr0.x, 2
3016 * mov.s16f32, dst, tmp
3020 instr
= instr_create(ctx
, 2, OPC_MUL_S
);
3021 ir3_reg_create(instr
, regid
, 0); /* dummy dst */
3022 ir3_reg_create(instr
, 0, IR3_REG_SSA
)->instr
= ctx
->frag_face
;
3023 ir3_reg_create(instr
, 0, IR3_REG_IMMED
)->iim_val
= 2;
3026 instr
= instr_create(ctx
, 2, OPC_ADD_S
);
3027 ir3_reg_create(instr
, regid
, 0); /* dummy dst */
3028 ir3_reg_create(instr
, 0, IR3_REG_SSA
)->instr
= src
;
3029 ir3_reg_create(instr
, 0, IR3_REG_IMMED
)->iim_val
= 1;
3032 instr
= instr_create(ctx
, 1, 0); /* mov */
3033 instr
->cat1
.src_type
= TYPE_S32
;
3034 instr
->cat1
.dst_type
= TYPE_F32
;
3035 ir3_reg_create(instr
, regid
, 0); /* dummy dst */
3036 ir3_reg_create(instr
, 0, IR3_REG_SSA
)->instr
= src
;
3041 instr
= create_immed(ctx
, 0.0);
3044 instr
= create_immed(ctx
, 1.0);
3047 compile_error(ctx
, "invalid channel\n");
3048 instr
= create_immed(ctx
, 0.0);
3056 decl_in(struct ir3_compile_context
*ctx
, struct tgsi_full_declaration
*decl
)
3058 struct ir3_shader_variant
*so
= ctx
->so
;
3059 unsigned name
= decl
->Semantic
.Name
;
3062 /* I don't think we should get frag shader input without
3063 * semantic info? Otherwise how do inputs get linked to
3066 compile_assert(ctx
, (ctx
->type
== TGSI_PROCESSOR_VERTEX
) ||
3067 decl
->Declaration
.Semantic
);
3069 for (i
= decl
->Range
.First
; i
<= decl
->Range
.Last
; i
++) {
3070 unsigned n
= so
->inputs_count
++;
3071 unsigned r
= regid(i
, 0);
3074 /* we'll figure out the actual components used after scheduling */
3077 DBG("decl in -> r%d", i
);
3079 compile_assert(ctx
, n
< ARRAY_SIZE(so
->inputs
));
3081 so
->inputs
[n
].semantic
= decl_semantic(&decl
->Semantic
);
3082 so
->inputs
[n
].compmask
= (1 << ncomp
) - 1;
3083 so
->inputs
[n
].regid
= r
;
3084 so
->inputs
[n
].inloc
= ctx
->next_inloc
;
3085 so
->inputs
[n
].interpolate
= decl
->Interp
.Interpolate
;
3087 for (j
= 0; j
< ncomp
; j
++) {
3088 struct ir3_instruction
*instr
= NULL
;
3090 if (ctx
->type
== TGSI_PROCESSOR_FRAGMENT
) {
3091 /* for fragment shaders, POSITION and FACE are handled
3092 * specially, not using normal varying / bary.f
3094 if (name
== TGSI_SEMANTIC_POSITION
) {
3095 so
->inputs
[n
].bary
= false;
3096 so
->frag_coord
= true;
3097 instr
= decl_in_frag_coord(ctx
, r
+ j
, j
);
3098 } else if (name
== TGSI_SEMANTIC_FACE
) {
3099 so
->inputs
[n
].bary
= false;
3100 so
->frag_face
= true;
3101 instr
= decl_in_frag_face(ctx
, r
+ j
, j
);
3103 bool use_ldlv
= false;
3105 /* I don't believe it is valid to not have Interp
3106 * on a normal frag shader input, and various parts
3107 * that that handle flat/smooth shading make this
3108 * assumption as well.
3110 compile_assert(ctx
, decl
->Declaration
.Interpolate
);
3112 if (ctx
->flat_bypass
) {
3113 switch (decl
->Interp
.Interpolate
) {
3114 case TGSI_INTERPOLATE_COLOR
:
3115 if (!ctx
->so
->key
.rasterflat
)
3118 case TGSI_INTERPOLATE_CONSTANT
:
3124 so
->inputs
[n
].bary
= true;
3126 instr
= decl_in_frag_bary(ctx
, r
+ j
, j
,
3127 so
->inputs
[n
].inloc
+ j
- 8, use_ldlv
);
3130 instr
= create_input(ctx
->block
, NULL
, (i
* 4) + j
);
3133 ctx
->block
->inputs
[(i
* 4) + j
] = instr
;
3136 if (so
->inputs
[n
].bary
|| (ctx
->type
== TGSI_PROCESSOR_VERTEX
)) {
3137 ctx
->next_inloc
+= ncomp
;
3138 so
->total_in
+= ncomp
;
3144 decl_sv(struct ir3_compile_context
*ctx
, struct tgsi_full_declaration
*decl
)
3146 struct ir3_shader_variant
*so
= ctx
->so
;
3147 unsigned r
= regid(so
->inputs_count
, 0);
3148 unsigned n
= so
->inputs_count
++;
3150 DBG("decl sv -> r%d", n
);
3152 compile_assert(ctx
, n
< ARRAY_SIZE(so
->inputs
));
3153 compile_assert(ctx
, decl
->Range
.First
< ARRAY_SIZE(ctx
->sysval_semantics
));
3155 ctx
->sysval_semantics
[decl
->Range
.First
] = decl
->Semantic
.Name
;
3156 so
->inputs
[n
].semantic
= decl_semantic(&decl
->Semantic
);
3157 so
->inputs
[n
].compmask
= 1;
3158 so
->inputs
[n
].regid
= r
;
3159 so
->inputs
[n
].inloc
= ctx
->next_inloc
;
3160 so
->inputs
[n
].interpolate
= false;
3162 struct ir3_instruction
*instr
= NULL
;
3164 switch (decl
->Semantic
.Name
) {
3165 case TGSI_SEMANTIC_VERTEXID_NOBASE
:
3166 ctx
->vertex_id
= instr
= create_input(ctx
->block
, NULL
, r
);
3168 case TGSI_SEMANTIC_BASEVERTEX
:
3169 ctx
->basevertex
= instr
= instr_create(ctx
, 1, 0);
3170 instr
->cat1
.src_type
= get_stype(ctx
);
3171 instr
->cat1
.dst_type
= get_stype(ctx
);
3172 ir3_reg_create(instr
, 0, 0);
3173 ir3_reg_create(instr
, regid(so
->first_driver_param
, 0), IR3_REG_CONST
);
3175 case TGSI_SEMANTIC_INSTANCEID
:
3176 ctx
->instance_id
= instr
= create_input(ctx
->block
, NULL
, r
);
3179 compile_error(ctx
, "Unknown semantic: %s\n",
3180 tgsi_semantic_names
[decl
->Semantic
.Name
]);
3183 ctx
->block
->inputs
[r
] = instr
;
3189 decl_out(struct ir3_compile_context
*ctx
, struct tgsi_full_declaration
*decl
)
3191 struct ir3_shader_variant
*so
= ctx
->so
;
3193 unsigned name
= decl
->Semantic
.Name
;
3196 compile_assert(ctx
, decl
->Declaration
.Semantic
);
3198 DBG("decl out[%d] -> r%d", name
, decl
->Range
.First
);
3200 if (ctx
->type
== TGSI_PROCESSOR_VERTEX
) {
3202 case TGSI_SEMANTIC_POSITION
:
3203 so
->writes_pos
= true;
3205 case TGSI_SEMANTIC_PSIZE
:
3206 so
->writes_psize
= true;
3208 case TGSI_SEMANTIC_COLOR
:
3209 case TGSI_SEMANTIC_BCOLOR
:
3210 case TGSI_SEMANTIC_GENERIC
:
3211 case TGSI_SEMANTIC_FOG
:
3212 case TGSI_SEMANTIC_TEXCOORD
:
3215 compile_error(ctx
, "unknown VS semantic name: %s\n",
3216 tgsi_semantic_names
[name
]);
3220 case TGSI_SEMANTIC_POSITION
:
3221 comp
= 2; /* tgsi will write to .z component */
3222 so
->writes_pos
= true;
3224 case TGSI_SEMANTIC_COLOR
:
3227 compile_error(ctx
, "unknown FS semantic name: %s\n",
3228 tgsi_semantic_names
[name
]);
3232 for (i
= decl
->Range
.First
; i
<= decl
->Range
.Last
; i
++) {
3233 unsigned n
= so
->outputs_count
++;
3238 compile_assert(ctx
, n
< ARRAY_SIZE(so
->outputs
));
3240 so
->outputs
[n
].semantic
= decl_semantic(&decl
->Semantic
);
3241 so
->outputs
[n
].regid
= regid(i
, comp
);
3243 /* avoid undefined outputs, stick a dummy mov from imm{0.0},
3244 * which if the output is actually assigned will be over-
3247 for (j
= 0; j
< ncomp
; j
++)
3248 ctx
->block
->outputs
[(i
* 4) + j
] = create_immed(ctx
, 0.0);
3252 /* from TGSI perspective, we actually have inputs. But most of the "inputs"
3253 * for a fragment shader are just bary.f instructions. The *actual* inputs
3254 * from the hw perspective are the frag_pos and optionally frag_coord and
3258 fixup_frag_inputs(struct ir3_compile_context
*ctx
)
3260 struct ir3_shader_variant
*so
= ctx
->so
;
3261 struct ir3_block
*block
= ctx
->block
;
3262 struct ir3_instruction
**inputs
;
3263 struct ir3_instruction
*instr
;
3268 n
= 4; /* always have frag_pos */
3269 n
+= COND(so
->frag_face
, 4);
3270 n
+= COND(so
->frag_coord
, 4);
3272 inputs
= ir3_alloc(ctx
->ir
, n
* (sizeof(struct ir3_instruction
*)));
3274 if (so
->frag_face
) {
3275 /* this ultimately gets assigned to hr0.x so doesn't conflict
3276 * with frag_coord/frag_pos..
3278 inputs
[block
->ninputs
++] = ctx
->frag_face
;
3279 ctx
->frag_face
->regs
[0]->num
= 0;
3281 /* remaining channels not used, but let's avoid confusing
3282 * other parts that expect inputs to come in groups of vec4
3284 inputs
[block
->ninputs
++] = NULL
;
3285 inputs
[block
->ninputs
++] = NULL
;
3286 inputs
[block
->ninputs
++] = NULL
;
3289 /* since we don't know where to set the regid for frag_coord,
3290 * we have to use r0.x for it. But we don't want to *always*
3291 * use r1.x for frag_pos as that could increase the register
3292 * footprint on simple shaders:
3294 if (so
->frag_coord
) {
3295 ctx
->frag_coord
[0]->regs
[0]->num
= regid
++;
3296 ctx
->frag_coord
[1]->regs
[0]->num
= regid
++;
3297 ctx
->frag_coord
[2]->regs
[0]->num
= regid
++;
3298 ctx
->frag_coord
[3]->regs
[0]->num
= regid
++;
3300 inputs
[block
->ninputs
++] = ctx
->frag_coord
[0];
3301 inputs
[block
->ninputs
++] = ctx
->frag_coord
[1];
3302 inputs
[block
->ninputs
++] = ctx
->frag_coord
[2];
3303 inputs
[block
->ninputs
++] = ctx
->frag_coord
[3];
3306 /* we always have frag_pos: */
3307 so
->pos_regid
= regid
;
3310 instr
= create_input(block
, NULL
, block
->ninputs
);
3311 instr
->regs
[0]->num
= regid
++;
3312 inputs
[block
->ninputs
++] = instr
;
3313 ctx
->frag_pos
->regs
[1]->instr
= instr
;
3316 instr
= create_input(block
, NULL
, block
->ninputs
);
3317 instr
->regs
[0]->num
= regid
++;
3318 inputs
[block
->ninputs
++] = instr
;
3319 ctx
->frag_pos
->regs
[2]->instr
= instr
;
3321 block
->inputs
= inputs
;
3325 compile_instructions(struct ir3_compile_context
*ctx
)
3329 /* for fragment shader, we have a single input register (usually
3330 * r0.xy) which is used as the base for bary.f varying fetch instrs:
3332 if (ctx
->type
== TGSI_PROCESSOR_FRAGMENT
) {
3333 struct ir3_instruction
*instr
;
3334 instr
= ir3_instr_create(ctx
->block
, -1, OPC_META_FI
);
3335 ir3_reg_create(instr
, 0, 0);
3336 ir3_reg_create(instr
, 0, IR3_REG_SSA
); /* r0.x */
3337 ir3_reg_create(instr
, 0, IR3_REG_SSA
); /* r0.y */
3338 ctx
->frag_pos
= instr
;
3341 while (!tgsi_parse_end_of_tokens(&ctx
->parser
)) {
3342 tgsi_parse_token(&ctx
->parser
);
3344 switch (ctx
->parser
.FullToken
.Token
.Type
) {
3345 case TGSI_TOKEN_TYPE_DECLARATION
: {
3346 struct tgsi_full_declaration
*decl
=
3347 &ctx
->parser
.FullToken
.FullDeclaration
;
3348 unsigned file
= decl
->Declaration
.File
;
3349 if (file
== TGSI_FILE_OUTPUT
) {
3350 decl_out(ctx
, decl
);
3351 } else if (file
== TGSI_FILE_INPUT
) {
3353 } else if (decl
->Declaration
.File
== TGSI_FILE_SYSTEM_VALUE
) {
3357 if ((file
!= TGSI_FILE_CONSTANT
) && decl
->Declaration
.Array
) {
3358 int aid
= decl
->Array
.ArrayID
+ ctx
->array_offsets
[file
];
3360 compile_assert(ctx
, aid
< ARRAY_SIZE(ctx
->array
));
3362 /* legacy ArrayID==0 stuff probably isn't going to work
3363 * well (and is at least untested).. let's just scream:
3365 compile_assert(ctx
, aid
!= 0);
3367 ctx
->array
[aid
].first
= decl
->Range
.First
;
3368 ctx
->array
[aid
].last
= decl
->Range
.Last
;
3372 case TGSI_TOKEN_TYPE_IMMEDIATE
: {
3373 /* TODO: if we know the immediate is small enough, and only
3374 * used with instructions that can embed an immediate, we
3377 struct tgsi_full_immediate
*imm
=
3378 &ctx
->parser
.FullToken
.FullImmediate
;
3379 unsigned n
= ctx
->so
->immediates_count
++;
3380 compile_assert(ctx
, n
< ARRAY_SIZE(ctx
->so
->immediates
));
3381 memcpy(ctx
->so
->immediates
[n
].val
, imm
->u
, 16);
3384 case TGSI_TOKEN_TYPE_INSTRUCTION
: {
3385 struct tgsi_full_instruction
*inst
=
3386 &ctx
->parser
.FullToken
.FullInstruction
;
3387 unsigned opc
= inst
->Instruction
.Opcode
;
3388 const struct instr_translater
*t
= &translaters
[opc
];
3391 t
->fxn(t
, ctx
, inst
);
3392 ctx
->num_internal_temps
= 0;
3394 compile_assert(ctx
, !ctx
->using_tmp_dst
);
3396 compile_error(ctx
, "unknown TGSI opc: %s\n",
3397 tgsi_get_opcode_name(opc
));
3400 switch (inst
->Instruction
.Saturate
) {
3401 case TGSI_SAT_ZERO_ONE
:
3402 create_clamp_imm(ctx
, &inst
->Dst
[0].Register
,
3403 fui(0.0), fui(1.0));
3405 case TGSI_SAT_MINUS_PLUS_ONE
:
3406 create_clamp_imm(ctx
, &inst
->Dst
[0].Register
,
3407 fui(-1.0), fui(1.0));
3415 case TGSI_TOKEN_TYPE_PROPERTY
: {
3416 struct tgsi_full_property
*prop
=
3417 &ctx
->parser
.FullToken
.FullProperty
;
3418 switch (prop
->Property
.PropertyName
) {
3419 case TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS
:
3420 ctx
->so
->color0_mrt
= !!prop
->u
[0].Data
;
3431 compile_dump(struct ir3_compile_context
*ctx
)
3433 const char *name
= (ctx
->so
->type
== SHADER_VERTEX
) ? "vert" : "frag";
3434 static unsigned n
= 0;
3437 snprintf(fname
, sizeof(fname
), "%s-%04u.dot", name
, n
++);
3438 f
= fopen(fname
, "w");
3441 ir3_block_depth(ctx
->block
);
3442 ir3_dump(ctx
->ir
, name
, ctx
->block
, f
);
3447 ir3_compile_shader(struct ir3_shader_variant
*so
,
3448 const struct tgsi_token
*tokens
, struct ir3_shader_key key
,
3451 struct ir3_compile_context ctx
;
3452 struct ir3_block
*block
;
3453 struct ir3_instruction
**inputs
;
3454 unsigned i
, j
, actual_in
;
3455 int ret
= 0, max_bary
;
3459 so
->ir
= ir3_create();
3463 if (compile_init(&ctx
, so
, tokens
) != TGSI_PARSE_OK
) {
3464 DBG("INIT failed!");
3469 /* for now, until the edge cases are worked out: */
3470 if (ctx
.info
.indirect_files_written
& (FM(TEMPORARY
) | FM(INPUT
) | FM(OUTPUT
)))
3473 compile_instructions(&ctx
);
3476 so
->ir
->block
= block
;
3478 /* keep track of the inputs from TGSI perspective.. */
3479 inputs
= block
->inputs
;
3481 /* but fixup actual inputs for frag shader: */
3482 if (ctx
.type
== TGSI_PROCESSOR_FRAGMENT
)
3483 fixup_frag_inputs(&ctx
);
3485 /* at this point, for binning pass, throw away unneeded outputs: */
3486 if (key
.binning_pass
) {
3487 for (i
= 0, j
= 0; i
< so
->outputs_count
; i
++) {
3488 unsigned name
= sem2name(so
->outputs
[i
].semantic
);
3489 unsigned idx
= sem2idx(so
->outputs
[i
].semantic
);
3491 /* throw away everything but first position/psize */
3492 if ((idx
== 0) && ((name
== TGSI_SEMANTIC_POSITION
) ||
3493 (name
== TGSI_SEMANTIC_PSIZE
))) {
3495 so
->outputs
[j
] = so
->outputs
[i
];
3496 block
->outputs
[(j
*4)+0] = block
->outputs
[(i
*4)+0];
3497 block
->outputs
[(j
*4)+1] = block
->outputs
[(i
*4)+1];
3498 block
->outputs
[(j
*4)+2] = block
->outputs
[(i
*4)+2];
3499 block
->outputs
[(j
*4)+3] = block
->outputs
[(i
*4)+3];
3504 so
->outputs_count
= j
;
3505 block
->noutputs
= j
* 4;
3508 /* if we want half-precision outputs, mark the output registers
3511 if (key
.half_precision
) {
3512 for (i
= 0; i
< block
->noutputs
; i
++) {
3513 if (!block
->outputs
[i
])
3515 block
->outputs
[i
]->regs
[0]->flags
|= IR3_REG_HALF
;
3519 /* at this point, we want the kill's in the outputs array too,
3520 * so that they get scheduled (since they have no dst).. we've
3521 * already ensured that the array is big enough in push_block():
3523 if (ctx
.type
== TGSI_PROCESSOR_FRAGMENT
) {
3524 for (i
= 0; i
< ctx
.kill_count
; i
++)
3525 block
->outputs
[block
->noutputs
++] = ctx
.kill
[i
];
3528 if (fd_mesa_debug
& FD_DBG_OPTDUMP
)
3531 ret
= ir3_block_flatten(block
);
3533 DBG("FLATTEN failed!");
3536 if ((ret
> 0) && (fd_mesa_debug
& FD_DBG_OPTDUMP
))
3539 if (fd_mesa_debug
& FD_DBG_OPTMSGS
) {
3540 printf("BEFORE CP:\n");
3541 ir3_dump_instr_list(block
->head
);
3544 ir3_block_depth(block
);
3546 /* First remove all the extra mov's (which we could skip if the
3547 * front-end was clever enough not to insert them in the first
3548 * place). Then figure out left/right neighbors, re-inserting
3549 * extra mov's when needed to avoid conflicts.
3551 if (cp
&& !(fd_mesa_debug
& FD_DBG_NOCP
))
3552 ir3_block_cp(block
);
3554 if (fd_mesa_debug
& FD_DBG_OPTMSGS
) {
3555 printf("BEFORE GROUPING:\n");
3556 ir3_dump_instr_list(block
->head
);
3559 /* Group left/right neighbors, inserting mov's where needed to
3562 ir3_block_group(block
);
3564 if (fd_mesa_debug
& FD_DBG_OPTDUMP
)
3567 ir3_block_depth(block
);
3569 if (fd_mesa_debug
& FD_DBG_OPTMSGS
) {
3570 printf("AFTER DEPTH:\n");
3571 ir3_dump_instr_list(block
->head
);
3574 ret
= ir3_block_sched(block
);
3576 DBG("SCHED failed!");
3580 if (fd_mesa_debug
& FD_DBG_OPTMSGS
) {
3581 printf("AFTER SCHED:\n");
3582 ir3_dump_instr_list(block
->head
);
3585 ret
= ir3_block_ra(block
, so
->type
, so
->frag_coord
, so
->frag_face
);
3591 if (fd_mesa_debug
& FD_DBG_OPTMSGS
) {
3592 printf("AFTER RA:\n");
3593 ir3_dump_instr_list(block
->head
);
3596 ir3_block_legalize(block
, &so
->has_samp
, &max_bary
);
3598 /* fixup input/outputs: */
3599 for (i
= 0; i
< so
->outputs_count
; i
++) {
3600 so
->outputs
[i
].regid
= block
->outputs
[i
*4]->regs
[0]->num
;
3601 /* preserve hack for depth output.. tgsi writes depth to .z,
3602 * but what we give the hw is the scalar register:
3604 if ((ctx
.type
== TGSI_PROCESSOR_FRAGMENT
) &&
3605 (sem2name(so
->outputs
[i
].semantic
) == TGSI_SEMANTIC_POSITION
))
3606 so
->outputs
[i
].regid
+= 2;
3608 /* Note that some or all channels of an input may be unused: */
3610 for (i
= 0; i
< so
->inputs_count
; i
++) {
3611 unsigned j
, regid
= ~0, compmask
= 0;
3612 so
->inputs
[i
].ncomp
= 0;
3613 for (j
= 0; j
< 4; j
++) {
3614 struct ir3_instruction
*in
= inputs
[(i
*4) + j
];
3616 compmask
|= (1 << j
);
3617 regid
= in
->regs
[0]->num
- j
;
3619 so
->inputs
[i
].ncomp
++;
3622 so
->inputs
[i
].regid
= regid
;
3623 so
->inputs
[i
].compmask
= compmask
;
3626 /* fragment shader always gets full vec4's even if it doesn't
3627 * fetch all components, but vertex shader we need to update
3628 * with the actual number of components fetch, otherwise thing
3629 * will hang due to mismaptch between VFD_DECODE's and
3632 if (so
->type
== SHADER_VERTEX
)
3633 so
->total_in
= actual_in
;
3635 so
->total_in
= align(max_bary
+ 1, 4);
3639 ir3_destroy(so
->ir
);