1 /* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
4 * Copyright (C) 2013 Rob Clark <robclark@freedesktop.org>
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26 * Rob Clark <robclark@freedesktop.org>
31 #include "pipe/p_state.h"
32 #include "util/u_string.h"
33 #include "util/u_memory.h"
34 #include "util/u_inlines.h"
35 #include "tgsi/tgsi_parse.h"
36 #include "tgsi/tgsi_ureg.h"
37 #include "tgsi/tgsi_info.h"
38 #include "tgsi/tgsi_strings.h"
39 #include "tgsi/tgsi_dump.h"
40 #include "tgsi/tgsi_scan.h"
42 #include "freedreno_lowering.h"
43 #include "freedreno_util.h"
45 #include "ir3_compiler.h"
46 #include "ir3_shader.h"
48 #include "instr-a3xx.h"
51 struct ir3_compile_context
{
52 const struct tgsi_token
*tokens
;
55 struct ir3_shader_variant
*so
;
57 struct ir3_block
*block
;
58 struct ir3_instruction
*current_instr
;
60 /* we need to defer updates to block->outputs[] until the end
61 * of an instruction (so we don't see new value until *after*
62 * the src registers are processed)
65 struct ir3_instruction
*instr
, **instrp
;
67 unsigned num_output_updates
;
69 /* are we in a sequence of "atomic" instructions?
73 /* For fragment shaders, from the hw perspective the only
74 * actual input is r0.xy position register passed to bary.f.
75 * But TGSI doesn't know that, it still declares things as
76 * IN[] registers. So we do all the input tracking normally
77 * and fix things up after compile_instructions()
79 * NOTE that frag_pos is the hardware position (possibly it
80 * is actually an index or tag or some such.. it is *not*
81 * values that can be directly used for gl_FragCoord..)
83 struct ir3_instruction
*frag_pos
, *frag_face
, *frag_coord
[4];
85 struct tgsi_parse_context parser
;
88 struct tgsi_shader_info info
;
90 /* for calculating input/output positions/linkages: */
93 unsigned num_internal_temps
;
94 struct tgsi_src_register internal_temps
[6];
96 /* idx/slot for last compiler generated immediate */
97 unsigned immediate_idx
;
99 /* stack of branch instructions that mark (potentially nested)
100 * branch if/else/loop/etc
103 struct ir3_instruction
*instr
, *cond
;
104 bool inv
; /* true iff in else leg of branch */
106 unsigned int branch_count
;
108 /* list of kill instructions: */
109 struct ir3_instruction
*kill
[16];
110 unsigned int kill_count
;
112 /* used when dst is same as one of the src, to avoid overwriting a
113 * src element before the remaining scalar instructions that make
114 * up the vector operation
116 struct tgsi_dst_register tmp_dst
;
117 struct tgsi_src_register
*tmp_src
;
119 /* just for catching incorrect use of get_dst()/put_dst():
125 static void vectorize(struct ir3_compile_context
*ctx
,
126 struct ir3_instruction
*instr
, struct tgsi_dst_register
*dst
,
128 static void create_mov(struct ir3_compile_context
*ctx
,
129 struct tgsi_dst_register
*dst
, struct tgsi_src_register
*src
);
130 static type_t
get_ftype(struct ir3_compile_context
*ctx
);
133 compile_init(struct ir3_compile_context
*ctx
, struct ir3_shader_variant
*so
,
134 const struct tgsi_token
*tokens
)
137 struct tgsi_shader_info
*info
= &ctx
->info
;
138 const struct fd_lowering_config lconfig
= {
139 .color_two_side
= so
->key
.color_two_side
,
156 ctx
->tokens
= fd_transform_lowering(&lconfig
, tokens
, &ctx
->info
);
157 ctx
->free_tokens
= !!ctx
->tokens
;
160 ctx
->tokens
= tokens
;
165 ctx
->num_internal_temps
= 0;
166 ctx
->branch_count
= 0;
169 ctx
->current_instr
= NULL
;
170 ctx
->num_output_updates
= 0;
172 ctx
->frag_pos
= NULL
;
173 ctx
->frag_face
= NULL
;
175 ctx
->using_tmp_dst
= false;
177 memset(ctx
->frag_coord
, 0, sizeof(ctx
->frag_coord
));
179 #define FM(x) (1 << TGSI_FILE_##x)
180 /* optimize can't deal with relative addressing: */
181 if (info
->indirect_files
& (FM(TEMPORARY
) | FM(INPUT
) | FM(OUTPUT
)))
182 return TGSI_PARSE_ERROR
;
184 /* NOTE: if relative addressing is used, we set constlen in
185 * the compiler (to worst-case value) since we don't know in
186 * the assembler what the max addr reg value can be:
188 if (info
->indirect_files
& FM(CONSTANT
))
189 so
->constlen
= 4 * (ctx
->info
.file_max
[TGSI_FILE_CONSTANT
] + 1);
191 /* Immediates go after constants: */
192 so
->first_immediate
= info
->file_max
[TGSI_FILE_CONSTANT
] + 1;
193 ctx
->immediate_idx
= 4 * (ctx
->info
.file_max
[TGSI_FILE_IMMEDIATE
] + 1);
195 ret
= tgsi_parse_init(&ctx
->parser
, ctx
->tokens
);
196 if (ret
!= TGSI_PARSE_OK
)
199 ctx
->type
= ctx
->parser
.FullHeader
.Processor
.Processor
;
205 compile_error(struct ir3_compile_context
*ctx
, const char *format
, ...)
208 va_start(ap
, format
);
209 _debug_vprintf(format
, ap
);
211 tgsi_dump(ctx
->tokens
, 0);
215 #define compile_assert(ctx, cond) do { \
216 if (!(cond)) compile_error((ctx), "failed assert: "#cond"\n"); \
220 compile_free(struct ir3_compile_context
*ctx
)
222 if (ctx
->free_tokens
)
223 free((void *)ctx
->tokens
);
224 tgsi_parse_free(&ctx
->parser
);
227 struct instr_translater
{
228 void (*fxn
)(const struct instr_translater
*t
,
229 struct ir3_compile_context
*ctx
,
230 struct tgsi_full_instruction
*inst
);
233 opc_t hopc
; /* opc to use for half_precision mode, if different */
238 instr_finish(struct ir3_compile_context
*ctx
)
245 for (i
= 0; i
< ctx
->num_output_updates
; i
++)
246 *(ctx
->output_updates
[i
].instrp
) = ctx
->output_updates
[i
].instr
;
248 ctx
->num_output_updates
= 0;
251 /* For "atomic" groups of instructions, for example the four scalar
252 * instructions to perform a vec4 operation. Basically this just
253 * blocks out handling of output_updates so the next scalar instruction
254 * still sees the result from before the start of the atomic group.
256 * NOTE: when used properly, this could probably replace get/put_dst()
260 instr_atomic_start(struct ir3_compile_context
*ctx
)
266 instr_atomic_end(struct ir3_compile_context
*ctx
)
272 static struct ir3_instruction
*
273 instr_create(struct ir3_compile_context
*ctx
, int category
, opc_t opc
)
276 return (ctx
->current_instr
= ir3_instr_create(ctx
->block
, category
, opc
));
279 static struct ir3_instruction
*
280 instr_clone(struct ir3_compile_context
*ctx
, struct ir3_instruction
*instr
)
283 return (ctx
->current_instr
= ir3_instr_clone(instr
));
286 static struct ir3_block
*
287 push_block(struct ir3_compile_context
*ctx
)
289 struct ir3_block
*block
;
290 unsigned ntmp
, nin
, nout
;
292 #define SCALAR_REGS(file) (4 * (ctx->info.file_max[TGSI_FILE_ ## file] + 1))
294 /* hmm, give ourselves room to create 4 extra temporaries (vec4):
296 ntmp
= SCALAR_REGS(TEMPORARY
);
299 nout
= SCALAR_REGS(OUTPUT
);
300 nin
= SCALAR_REGS(INPUT
);
302 /* for outermost block, 'inputs' are the actual shader INPUT
303 * register file. Reads from INPUT registers always go back to
304 * top block. For nested blocks, 'inputs' is used to track any
305 * TEMPORARY file register from one of the enclosing blocks that
306 * is ready in this block.
309 /* NOTE: fragment shaders actually have two inputs (r0.xy, the
312 if (ctx
->type
== TGSI_PROCESSOR_FRAGMENT
) {
314 if (ctx
->info
.reads_position
)
316 if (ctx
->info
.uses_frontface
)
319 nout
+= ARRAY_SIZE(ctx
->kill
);
325 block
= ir3_block_create(ctx
->ir
, ntmp
, nin
, nout
);
327 if ((ctx
->type
== TGSI_PROCESSOR_FRAGMENT
) && !ctx
->block
)
328 block
->noutputs
-= ARRAY_SIZE(ctx
->kill
);
330 block
->parent
= ctx
->block
;
337 pop_block(struct ir3_compile_context
*ctx
)
339 ctx
->block
= ctx
->block
->parent
;
340 compile_assert(ctx
, ctx
->block
);
343 static struct ir3_instruction
*
344 create_output(struct ir3_block
*block
, struct ir3_instruction
*instr
,
347 struct ir3_instruction
*out
;
349 out
= ir3_instr_create(block
, -1, OPC_META_OUTPUT
);
350 out
->inout
.block
= block
;
351 ir3_reg_create(out
, n
, 0);
353 ir3_reg_create(out
, 0, IR3_REG_SSA
)->instr
= instr
;
358 static struct ir3_instruction
*
359 create_input(struct ir3_block
*block
, struct ir3_instruction
*instr
,
362 struct ir3_instruction
*in
;
364 in
= ir3_instr_create(block
, -1, OPC_META_INPUT
);
365 in
->inout
.block
= block
;
366 ir3_reg_create(in
, n
, 0);
368 ir3_reg_create(in
, 0, IR3_REG_SSA
)->instr
= instr
;
373 static struct ir3_instruction
*
374 block_input(struct ir3_block
*block
, unsigned n
)
376 /* references to INPUT register file always go back up to
380 return block_input(block
->parent
, n
);
381 return block
->inputs
[n
];
384 /* return temporary in scope, creating if needed meta-input node
385 * to track block inputs
387 static struct ir3_instruction
*
388 block_temporary(struct ir3_block
*block
, unsigned n
)
390 /* references to TEMPORARY register file, find the nearest
391 * enclosing block which has already assigned this temporary,
392 * creating meta-input instructions along the way to keep
393 * track of block inputs
395 if (block
->parent
&& !block
->temporaries
[n
]) {
396 /* if already have input for this block, reuse: */
397 if (!block
->inputs
[n
])
398 block
->inputs
[n
] = block_temporary(block
->parent
, n
);
400 /* and create new input to return: */
401 return create_input(block
, block
->inputs
[n
], n
);
403 return block
->temporaries
[n
];
406 static struct ir3_instruction
*
407 create_immed(struct ir3_compile_context
*ctx
, float val
)
409 /* NOTE: *don't* use instr_create() here!
411 struct ir3_instruction
*instr
;
412 instr
= ir3_instr_create(ctx
->block
, 1, 0);
413 instr
->cat1
.src_type
= get_ftype(ctx
);
414 instr
->cat1
.dst_type
= get_ftype(ctx
);
415 ir3_reg_create(instr
, 0, 0);
416 ir3_reg_create(instr
, 0, IR3_REG_IMMED
)->fim_val
= val
;
421 ssa_dst(struct ir3_compile_context
*ctx
, struct ir3_instruction
*instr
,
422 const struct tgsi_dst_register
*dst
, unsigned chan
)
424 unsigned n
= regid(dst
->Index
, chan
);
425 unsigned idx
= ctx
->num_output_updates
;
427 compile_assert(ctx
, idx
< ARRAY_SIZE(ctx
->output_updates
));
429 /* NOTE: defer update of temporaries[idx] or output[idx]
430 * until instr_finish(), so that if the current instruction
431 * reads the same TEMP/OUT[] it gets the old value:
433 * bleh.. this might be a bit easier to just figure out
434 * in instr_finish(). But at that point we've already
435 * lost information about OUTPUT vs TEMPORARY register
440 case TGSI_FILE_OUTPUT
:
441 compile_assert(ctx
, n
< ctx
->block
->noutputs
);
442 ctx
->output_updates
[idx
].instrp
= &ctx
->block
->outputs
[n
];
443 ctx
->output_updates
[idx
].instr
= instr
;
444 ctx
->num_output_updates
++;
446 case TGSI_FILE_TEMPORARY
:
447 compile_assert(ctx
, n
< ctx
->block
->ntemporaries
);
448 ctx
->output_updates
[idx
].instrp
= &ctx
->block
->temporaries
[n
];
449 ctx
->output_updates
[idx
].instr
= instr
;
450 ctx
->num_output_updates
++;
452 case TGSI_FILE_ADDRESS
:
453 compile_assert(ctx
, n
< 1);
454 ctx
->output_updates
[idx
].instrp
= &ctx
->block
->address
;
455 ctx
->output_updates
[idx
].instr
= instr
;
456 ctx
->num_output_updates
++;
462 ssa_src(struct ir3_compile_context
*ctx
, struct ir3_register
*reg
,
463 const struct tgsi_src_register
*src
, unsigned chan
)
465 struct ir3_block
*block
= ctx
->block
;
466 unsigned n
= regid(src
->Index
, chan
);
469 case TGSI_FILE_INPUT
:
470 reg
->flags
|= IR3_REG_SSA
;
471 reg
->instr
= block_input(ctx
->block
, n
);
473 case TGSI_FILE_OUTPUT
:
474 /* really this should just happen in case of 'MOV_SAT OUT[n], ..',
475 * for the following clamp instructions:
477 reg
->flags
|= IR3_REG_SSA
;
478 reg
->instr
= block
->outputs
[n
];
479 /* we don't have to worry about read from an OUTPUT that was
480 * assigned outside of the current block, because the _SAT
481 * clamp instructions will always be in the same block as
482 * the original instruction which wrote the OUTPUT
484 compile_assert(ctx
, reg
->instr
);
486 case TGSI_FILE_TEMPORARY
:
487 reg
->flags
|= IR3_REG_SSA
;
488 reg
->instr
= block_temporary(ctx
->block
, n
);
492 if ((reg
->flags
& IR3_REG_SSA
) && !reg
->instr
) {
493 /* this can happen when registers (or components of a TGSI
494 * register) are used as src before they have been assigned
495 * (undefined contents). To avoid confusing the rest of the
496 * compiler, and to generally keep things peachy, substitute
497 * an instruction that sets the src to 0.0. Or to keep
498 * things undefined, I could plug in a random number? :-P
500 * NOTE: *don't* use instr_create() here!
502 reg
->instr
= create_immed(ctx
, 0.0);
506 static struct ir3_register
*
507 add_dst_reg_wrmask(struct ir3_compile_context
*ctx
,
508 struct ir3_instruction
*instr
, const struct tgsi_dst_register
*dst
,
509 unsigned chan
, unsigned wrmask
)
511 unsigned flags
= 0, num
= 0;
512 struct ir3_register
*reg
;
515 case TGSI_FILE_OUTPUT
:
516 case TGSI_FILE_TEMPORARY
:
519 case TGSI_FILE_ADDRESS
:
520 flags
|= IR3_REG_ADDR
;
524 compile_error(ctx
, "unsupported dst register file: %s\n",
525 tgsi_file_name(dst
->File
));
530 flags
|= IR3_REG_RELATIV
;
532 reg
= ir3_reg_create(instr
, regid(num
, chan
), flags
);
534 /* NOTE: do not call ssa_dst() if atomic.. vectorize()
535 * itself will call ssa_dst(). This is to filter out
536 * the (initially bogus) .x component dst which is
537 * created (but not necessarily used, ie. if the net
538 * result of the vector operation does not write to
542 reg
->wrmask
= wrmask
;
546 ssa_dst(ctx
, instr
, dst
, chan
);
547 } else if ((dst
->File
== TGSI_FILE_TEMPORARY
) ||
548 (dst
->File
== TGSI_FILE_OUTPUT
) ||
549 (dst
->File
== TGSI_FILE_ADDRESS
)) {
552 /* if instruction writes multiple, we need to create
553 * some place-holder collect the registers:
555 for (i
= 0; i
< 4; i
++) {
556 if (wrmask
& (1 << i
)) {
557 struct ir3_instruction
*collect
=
558 ir3_instr_create(ctx
->block
, -1, OPC_META_FO
);
560 /* unused dst reg: */
561 ir3_reg_create(collect
, 0, 0);
562 /* and src reg used to hold original instr */
563 ir3_reg_create(collect
, 0, IR3_REG_SSA
)->instr
= instr
;
565 ssa_dst(ctx
, collect
, dst
, chan
+i
);
573 static struct ir3_register
*
574 add_dst_reg(struct ir3_compile_context
*ctx
, struct ir3_instruction
*instr
,
575 const struct tgsi_dst_register
*dst
, unsigned chan
)
577 return add_dst_reg_wrmask(ctx
, instr
, dst
, chan
, 0x1);
580 static struct ir3_register
*
581 add_src_reg_wrmask(struct ir3_compile_context
*ctx
,
582 struct ir3_instruction
*instr
, const struct tgsi_src_register
*src
,
583 unsigned chan
, unsigned wrmask
)
585 unsigned flags
= 0, num
= 0;
586 struct ir3_register
*reg
;
587 struct ir3_instruction
*orig
= NULL
;
589 /* TODO we need to use a mov to temp for const >= 64.. or maybe
590 * we could use relative addressing..
592 compile_assert(ctx
, src
->Index
< 64);
595 case TGSI_FILE_IMMEDIATE
:
596 /* TODO if possible, use actual immediate instead of const.. but
597 * TGSI has vec4 immediates, we can only embed scalar (of limited
598 * size, depending on instruction..)
600 flags
|= IR3_REG_CONST
;
601 num
= src
->Index
+ ctx
->so
->first_immediate
;
603 case TGSI_FILE_CONSTANT
:
604 flags
|= IR3_REG_CONST
;
607 case TGSI_FILE_OUTPUT
:
608 /* NOTE: we should only end up w/ OUTPUT file for things like
609 * clamp()'ing saturated dst instructions
611 case TGSI_FILE_INPUT
:
612 case TGSI_FILE_TEMPORARY
:
616 compile_error(ctx
, "unsupported src register file: %s\n",
617 tgsi_file_name(src
->File
));
622 flags
|= IR3_REG_ABS
;
624 flags
|= IR3_REG_NEGATE
;
627 flags
|= IR3_REG_RELATIV
;
629 /* shouldn't happen, and we can't cope with it below: */
630 compile_assert(ctx
, wrmask
== 0x1);
632 /* wrap in a meta-deref to track both the src and address: */
635 instr
= ir3_instr_create(ctx
->block
, -1, OPC_META_DEREF
);
636 ir3_reg_create(instr
, 0, 0);
637 ir3_reg_create(instr
, 0, IR3_REG_SSA
)->instr
= ctx
->block
->address
;
640 reg
= ir3_reg_create(instr
, regid(num
, chan
), flags
);
642 reg
->wrmask
= wrmask
;
645 ssa_src(ctx
, reg
, src
, chan
);
646 } else if ((src
->File
== TGSI_FILE_TEMPORARY
) ||
647 (src
->File
== TGSI_FILE_OUTPUT
) ||
648 (src
->File
== TGSI_FILE_INPUT
)) {
649 struct ir3_instruction
*collect
;
652 compile_assert(ctx
, !src
->Indirect
);
654 /* if instruction reads multiple, we need to create
655 * some place-holder collect the registers:
657 collect
= ir3_instr_create(ctx
->block
, -1, OPC_META_FI
);
658 ir3_reg_create(collect
, 0, 0); /* unused dst reg */
660 for (i
= 0; i
< 4; i
++) {
661 if (wrmask
& (1 << i
)) {
662 /* and src reg used point to the original instr */
663 ssa_src(ctx
, ir3_reg_create(collect
, 0, IR3_REG_SSA
),
665 } else if (wrmask
& ~((i
<< i
) - 1)) {
666 /* if any remaining components, then dummy
667 * placeholder src reg to fill in the blanks:
669 ir3_reg_create(collect
, 0, 0);
673 reg
->flags
|= IR3_REG_SSA
;
674 reg
->instr
= collect
;
678 reg
= ir3_reg_create(orig
, 0, flags
| IR3_REG_SSA
);
684 static struct ir3_register
*
685 add_src_reg(struct ir3_compile_context
*ctx
, struct ir3_instruction
*instr
,
686 const struct tgsi_src_register
*src
, unsigned chan
)
688 return add_src_reg_wrmask(ctx
, instr
, src
, chan
, 0x1);
692 src_from_dst(struct tgsi_src_register
*src
, struct tgsi_dst_register
*dst
)
694 src
->File
= dst
->File
;
695 src
->Indirect
= dst
->Indirect
;
696 src
->Dimension
= dst
->Dimension
;
697 src
->Index
= dst
->Index
;
700 src
->SwizzleX
= TGSI_SWIZZLE_X
;
701 src
->SwizzleY
= TGSI_SWIZZLE_Y
;
702 src
->SwizzleZ
= TGSI_SWIZZLE_Z
;
703 src
->SwizzleW
= TGSI_SWIZZLE_W
;
706 /* Get internal-temp src/dst to use for a sequence of instructions
707 * generated by a single TGSI op.
709 static struct tgsi_src_register
*
710 get_internal_temp(struct ir3_compile_context
*ctx
,
711 struct tgsi_dst_register
*tmp_dst
)
713 struct tgsi_src_register
*tmp_src
;
716 tmp_dst
->File
= TGSI_FILE_TEMPORARY
;
717 tmp_dst
->WriteMask
= TGSI_WRITEMASK_XYZW
;
718 tmp_dst
->Indirect
= 0;
719 tmp_dst
->Dimension
= 0;
721 /* assign next temporary: */
722 n
= ctx
->num_internal_temps
++;
723 compile_assert(ctx
, n
< ARRAY_SIZE(ctx
->internal_temps
));
724 tmp_src
= &ctx
->internal_temps
[n
];
726 tmp_dst
->Index
= ctx
->info
.file_max
[TGSI_FILE_TEMPORARY
] + n
+ 1;
728 src_from_dst(tmp_src
, tmp_dst
);
734 is_const(struct tgsi_src_register
*src
)
736 return (src
->File
== TGSI_FILE_CONSTANT
) ||
737 (src
->File
== TGSI_FILE_IMMEDIATE
);
741 is_relative(struct tgsi_src_register
*src
)
743 return src
->Indirect
;
747 is_rel_or_const(struct tgsi_src_register
*src
)
749 return is_relative(src
) || is_const(src
);
753 get_ftype(struct ir3_compile_context
*ctx
)
759 get_utype(struct ir3_compile_context
*ctx
)
765 src_swiz(struct tgsi_src_register
*src
, int chan
)
768 case 0: return src
->SwizzleX
;
769 case 1: return src
->SwizzleY
;
770 case 2: return src
->SwizzleZ
;
771 case 3: return src
->SwizzleW
;
777 /* for instructions that cannot take a const register as src, if needed
778 * generate a move to temporary gpr:
780 static struct tgsi_src_register
*
781 get_unconst(struct ir3_compile_context
*ctx
, struct tgsi_src_register
*src
)
783 struct tgsi_dst_register tmp_dst
;
784 struct tgsi_src_register
*tmp_src
;
786 compile_assert(ctx
, is_rel_or_const(src
));
788 tmp_src
= get_internal_temp(ctx
, &tmp_dst
);
790 create_mov(ctx
, &tmp_dst
, src
);
796 get_immediate(struct ir3_compile_context
*ctx
,
797 struct tgsi_src_register
*reg
, uint32_t val
)
799 unsigned neg
, swiz
, idx
, i
;
800 /* actually maps 1:1 currently.. not sure if that is safe to rely on: */
801 static const unsigned swiz2tgsi
[] = {
802 TGSI_SWIZZLE_X
, TGSI_SWIZZLE_Y
, TGSI_SWIZZLE_Z
, TGSI_SWIZZLE_W
,
805 for (i
= 0; i
< ctx
->immediate_idx
; i
++) {
809 if (ctx
->so
->immediates
[idx
].val
[swiz
] == val
) {
814 if (ctx
->so
->immediates
[idx
].val
[swiz
] == -val
) {
820 if (i
== ctx
->immediate_idx
) {
821 /* need to generate a new immediate: */
825 ctx
->so
->immediates
[idx
].val
[swiz
] = val
;
826 ctx
->so
->immediates_count
= idx
+ 1;
827 ctx
->immediate_idx
++;
830 reg
->File
= TGSI_FILE_IMMEDIATE
;
836 reg
->SwizzleX
= swiz2tgsi
[swiz
];
837 reg
->SwizzleY
= swiz2tgsi
[swiz
];
838 reg
->SwizzleZ
= swiz2tgsi
[swiz
];
839 reg
->SwizzleW
= swiz2tgsi
[swiz
];
843 create_mov(struct ir3_compile_context
*ctx
, struct tgsi_dst_register
*dst
,
844 struct tgsi_src_register
*src
)
846 type_t type_mov
= get_ftype(ctx
);
849 for (i
= 0; i
< 4; i
++) {
850 /* move to destination: */
851 if (dst
->WriteMask
& (1 << i
)) {
852 struct ir3_instruction
*instr
;
854 if (src
->Absolute
|| src
->Negate
) {
855 /* can't have abs or neg on a mov instr, so use
856 * absneg.f instead to handle these cases:
858 instr
= instr_create(ctx
, 2, OPC_ABSNEG_F
);
860 instr
= instr_create(ctx
, 1, 0);
861 instr
->cat1
.src_type
= type_mov
;
862 instr
->cat1
.dst_type
= type_mov
;
865 add_dst_reg(ctx
, instr
, dst
, i
);
866 add_src_reg(ctx
, instr
, src
, src_swiz(src
, i
));
872 create_clamp(struct ir3_compile_context
*ctx
,
873 struct tgsi_dst_register
*dst
, struct tgsi_src_register
*val
,
874 struct tgsi_src_register
*minval
, struct tgsi_src_register
*maxval
)
876 struct ir3_instruction
*instr
;
878 instr
= instr_create(ctx
, 2, OPC_MAX_F
);
879 vectorize(ctx
, instr
, dst
, 2, val
, 0, minval
, 0);
881 instr
= instr_create(ctx
, 2, OPC_MIN_F
);
882 vectorize(ctx
, instr
, dst
, 2, val
, 0, maxval
, 0);
886 create_clamp_imm(struct ir3_compile_context
*ctx
,
887 struct tgsi_dst_register
*dst
,
888 uint32_t minval
, uint32_t maxval
)
890 struct tgsi_src_register minconst
, maxconst
;
891 struct tgsi_src_register src
;
893 src_from_dst(&src
, dst
);
895 get_immediate(ctx
, &minconst
, minval
);
896 get_immediate(ctx
, &maxconst
, maxval
);
898 create_clamp(ctx
, dst
, &src
, &minconst
, &maxconst
);
901 static struct tgsi_dst_register
*
902 get_dst(struct ir3_compile_context
*ctx
, struct tgsi_full_instruction
*inst
)
904 struct tgsi_dst_register
*dst
= &inst
->Dst
[0].Register
;
907 compile_assert(ctx
, !ctx
->using_tmp_dst
);
908 ctx
->using_tmp_dst
= true;
910 for (i
= 0; i
< inst
->Instruction
.NumSrcRegs
; i
++) {
911 struct tgsi_src_register
*src
= &inst
->Src
[i
].Register
;
912 if ((src
->File
== dst
->File
) && (src
->Index
== dst
->Index
)) {
913 if ((dst
->WriteMask
== TGSI_WRITEMASK_XYZW
) &&
914 (src
->SwizzleX
== TGSI_SWIZZLE_X
) &&
915 (src
->SwizzleY
== TGSI_SWIZZLE_Y
) &&
916 (src
->SwizzleZ
== TGSI_SWIZZLE_Z
) &&
917 (src
->SwizzleW
== TGSI_SWIZZLE_W
))
919 ctx
->tmp_src
= get_internal_temp(ctx
, &ctx
->tmp_dst
);
920 ctx
->tmp_dst
.WriteMask
= dst
->WriteMask
;
929 put_dst(struct ir3_compile_context
*ctx
, struct tgsi_full_instruction
*inst
,
930 struct tgsi_dst_register
*dst
)
932 compile_assert(ctx
, ctx
->using_tmp_dst
);
933 ctx
->using_tmp_dst
= false;
935 /* if necessary, add mov back into original dst: */
936 if (dst
!= &inst
->Dst
[0].Register
) {
937 create_mov(ctx
, &inst
->Dst
[0].Register
, ctx
->tmp_src
);
941 /* helper to generate the necessary repeat and/or additional instructions
942 * to turn a scalar instruction into a vector operation:
945 vectorize(struct ir3_compile_context
*ctx
, struct ir3_instruction
*instr
,
946 struct tgsi_dst_register
*dst
, int nsrcs
, ...)
951 instr_atomic_start(ctx
);
953 add_dst_reg(ctx
, instr
, dst
, TGSI_SWIZZLE_X
);
956 for (j
= 0; j
< nsrcs
; j
++) {
957 struct tgsi_src_register
*src
=
958 va_arg(ap
, struct tgsi_src_register
*);
959 unsigned flags
= va_arg(ap
, unsigned);
960 struct ir3_register
*reg
;
961 if (flags
& IR3_REG_IMMED
) {
962 reg
= ir3_reg_create(instr
, 0, IR3_REG_IMMED
);
963 /* this is an ugly cast.. should have put flags first! */
964 reg
->iim_val
= *(int *)&src
;
966 reg
= add_src_reg(ctx
, instr
, src
, TGSI_SWIZZLE_X
);
968 reg
->flags
|= flags
& ~IR3_REG_NEGATE
;
969 if (flags
& IR3_REG_NEGATE
)
970 reg
->flags
^= IR3_REG_NEGATE
;
974 for (i
= 0; i
< 4; i
++) {
975 if (dst
->WriteMask
& (1 << i
)) {
976 struct ir3_instruction
*cur
;
981 cur
= instr_clone(ctx
, instr
);
984 ssa_dst(ctx
, cur
, dst
, i
);
986 /* fix-up dst register component: */
987 cur
->regs
[0]->num
= regid(cur
->regs
[0]->num
>> 2, i
);
989 /* fix-up src register component: */
991 for (j
= 0; j
< nsrcs
; j
++) {
992 struct ir3_register
*reg
= cur
->regs
[j
+1];
993 struct tgsi_src_register
*src
=
994 va_arg(ap
, struct tgsi_src_register
*);
995 unsigned flags
= va_arg(ap
, unsigned);
996 if (reg
->flags
& IR3_REG_SSA
) {
997 ssa_src(ctx
, reg
, src
, src_swiz(src
, i
));
998 } else if (!(flags
& IR3_REG_IMMED
)) {
999 reg
->num
= regid(reg
->num
>> 2, src_swiz(src
, i
));
1006 instr_atomic_end(ctx
);
1010 * Handlers for TGSI instructions which do not have a 1:1 mapping to
1011 * native instructions:
1015 trans_clamp(const struct instr_translater
*t
,
1016 struct ir3_compile_context
*ctx
,
1017 struct tgsi_full_instruction
*inst
)
1019 struct tgsi_dst_register
*dst
= get_dst(ctx
, inst
);
1020 struct tgsi_src_register
*src0
= &inst
->Src
[0].Register
;
1021 struct tgsi_src_register
*src1
= &inst
->Src
[1].Register
;
1022 struct tgsi_src_register
*src2
= &inst
->Src
[2].Register
;
1024 create_clamp(ctx
, dst
, src0
, src1
, src2
);
1026 put_dst(ctx
, inst
, dst
);
1029 /* ARL(x) = x, but mova from hrN.x to a0.. */
1031 trans_arl(const struct instr_translater
*t
,
1032 struct ir3_compile_context
*ctx
,
1033 struct tgsi_full_instruction
*inst
)
1035 struct ir3_instruction
*instr
;
1036 struct tgsi_dst_register tmp_dst
;
1037 struct tgsi_src_register
*tmp_src
;
1038 struct tgsi_dst_register
*dst
= &inst
->Dst
[0].Register
;
1039 struct tgsi_src_register
*src
= &inst
->Src
[0].Register
;
1040 unsigned chan
= src
->SwizzleX
;
1042 compile_assert(ctx
, dst
->File
== TGSI_FILE_ADDRESS
);
1044 /* NOTE: we allocate a temporary from a flat register
1045 * namespace (ignoring half vs full). It turns out
1046 * not to really matter since registers get reassigned
1047 * later in ir3_ra which (hopefully!) can deal a bit
1048 * better with mixed half and full precision.
1050 tmp_src
= get_internal_temp(ctx
, &tmp_dst
);
1052 /* cov.{u,f}{32,16}s16 Rtmp, Rsrc */
1053 instr
= instr_create(ctx
, 1, 0);
1054 instr
->cat1
.src_type
= (t
->tgsi_opc
== TGSI_OPCODE_ARL
) ?
1055 get_ftype(ctx
) : get_utype(ctx
);
1056 instr
->cat1
.dst_type
= TYPE_S16
;
1057 add_dst_reg(ctx
, instr
, &tmp_dst
, chan
)->flags
|= IR3_REG_HALF
;
1058 add_src_reg(ctx
, instr
, src
, chan
);
1060 /* shl.b Rtmp, Rtmp, 2 */
1061 instr
= instr_create(ctx
, 2, OPC_SHL_B
);
1062 add_dst_reg(ctx
, instr
, &tmp_dst
, chan
)->flags
|= IR3_REG_HALF
;
1063 add_src_reg(ctx
, instr
, tmp_src
, chan
)->flags
|= IR3_REG_HALF
;
1064 ir3_reg_create(instr
, 0, IR3_REG_IMMED
)->iim_val
= 2;
1067 instr
= instr_create(ctx
, 1, 0);
1068 instr
->cat1
.src_type
= TYPE_S16
;
1069 instr
->cat1
.dst_type
= TYPE_S16
;
1070 add_dst_reg(ctx
, instr
, dst
, 0)->flags
|= IR3_REG_HALF
;
1071 add_src_reg(ctx
, instr
, tmp_src
, chan
)->flags
|= IR3_REG_HALF
;
1075 * texture fetch/sample instructions:
1080 unsigned src_wrmask
, flags
;
1083 static const struct tex_info
*
1084 get_tex_info(struct ir3_compile_context
*ctx
,
1085 struct tgsi_full_instruction
*inst
)
1087 static const struct tex_info tex1d
= {
1088 .order
= { 0, -1, -1, -1 }, /* coord.x */
1089 .src_wrmask
= TGSI_WRITEMASK_XY
,
1092 static const struct tex_info tex1ds
= {
1093 .order
= { 0, -1, 2, -1 }, /* coord.xz */
1094 .src_wrmask
= TGSI_WRITEMASK_XYZ
,
1095 .flags
= IR3_INSTR_S
,
1097 static const struct tex_info tex1da
= {
1098 .order
= { 0, -1, 1, -1 }, /* coord.xy */
1099 .src_wrmask
= TGSI_WRITEMASK_XYZ
,
1100 .flags
= IR3_INSTR_A
,
1102 static const struct tex_info tex1dsa
= {
1103 .order
= { 0, -1, 1, 2 }, /* coord.xyz */
1104 .src_wrmask
= TGSI_WRITEMASK_XYZW
,
1105 .flags
= IR3_INSTR_S
| IR3_INSTR_A
,
1107 static const struct tex_info tex2d
= {
1108 .order
= { 0, 1, -1, -1 }, /* coord.xy */
1109 .src_wrmask
= TGSI_WRITEMASK_XY
,
1112 static const struct tex_info tex2ds
= {
1113 .order
= { 0, 1, 2, -1 }, /* coord.xyz */
1114 .src_wrmask
= TGSI_WRITEMASK_XYZ
,
1115 .flags
= IR3_INSTR_S
,
1117 static const struct tex_info tex2da
= {
1118 .order
= { 0, 1, 2, -1 }, /* coord.xyz */
1119 .src_wrmask
= TGSI_WRITEMASK_XYZ
,
1120 .flags
= IR3_INSTR_A
,
1122 static const struct tex_info tex2dsa
= {
1123 .order
= { 0, 1, 2, 3 }, /* coord.xyzw */
1124 .src_wrmask
= TGSI_WRITEMASK_XYZW
,
1125 .flags
= IR3_INSTR_S
| IR3_INSTR_A
,
1127 static const struct tex_info tex3d
= {
1128 .order
= { 0, 1, 2, -1 }, /* coord.xyz */
1129 .src_wrmask
= TGSI_WRITEMASK_XYZ
,
1130 .flags
= IR3_INSTR_3D
,
1132 static const struct tex_info tex3ds
= {
1133 .order
= { 0, 1, 2, 3 }, /* coord.xyzw */
1134 .src_wrmask
= TGSI_WRITEMASK_XYZW
,
1135 .flags
= IR3_INSTR_S
| IR3_INSTR_3D
,
1137 static const struct tex_info txp1d
= {
1138 .order
= { 0, -1, 3, -1 }, /* coord.xw */
1139 .src_wrmask
= TGSI_WRITEMASK_XYZ
,
1140 .flags
= IR3_INSTR_P
,
1142 static const struct tex_info txp1ds
= {
1143 .order
= { 0, -1, 2, 3 }, /* coord.xyz */
1144 .src_wrmask
= TGSI_WRITEMASK_XYZW
,
1145 .flags
= IR3_INSTR_P
| IR3_INSTR_S
,
1147 static const struct tex_info txp2d
= {
1148 .order
= { 0, 1, 3, -1 }, /* coord.xyw */
1149 .src_wrmask
= TGSI_WRITEMASK_XYZ
,
1150 .flags
= IR3_INSTR_P
,
1152 static const struct tex_info txp2ds
= {
1153 .order
= { 0, 1, 2, 3 }, /* coord.xyzw */
1154 .src_wrmask
= TGSI_WRITEMASK_XYZW
,
1155 .flags
= IR3_INSTR_P
| IR3_INSTR_S
,
1157 static const struct tex_info txp3d
= {
1158 .order
= { 0, 1, 2, 3 }, /* coord.xyzw */
1159 .src_wrmask
= TGSI_WRITEMASK_XYZW
,
1160 .flags
= IR3_INSTR_P
| IR3_INSTR_3D
,
1163 unsigned tex
= inst
->Texture
.Texture
;
1165 switch (inst
->Instruction
.Opcode
) {
1166 case TGSI_OPCODE_TEX
:
1167 case TGSI_OPCODE_TXB
:
1168 case TGSI_OPCODE_TXL
:
1170 case TGSI_TEXTURE_1D
:
1172 case TGSI_TEXTURE_SHADOW1D
:
1174 case TGSI_TEXTURE_1D_ARRAY
:
1176 case TGSI_TEXTURE_SHADOW1D_ARRAY
:
1178 case TGSI_TEXTURE_2D
:
1179 case TGSI_TEXTURE_RECT
:
1181 case TGSI_TEXTURE_SHADOW2D
:
1182 case TGSI_TEXTURE_SHADOWRECT
:
1184 case TGSI_TEXTURE_2D_ARRAY
:
1186 case TGSI_TEXTURE_SHADOW2D_ARRAY
:
1188 case TGSI_TEXTURE_3D
:
1189 case TGSI_TEXTURE_CUBE
:
1191 case TGSI_TEXTURE_SHADOWCUBE
:
1194 compile_error(ctx
, "unknown texture type: %s\n",
1195 tgsi_texture_names
[tex
]);
1199 case TGSI_OPCODE_TXP
:
1201 case TGSI_TEXTURE_1D
:
1203 case TGSI_TEXTURE_SHADOW1D
:
1205 case TGSI_TEXTURE_2D
:
1206 case TGSI_TEXTURE_RECT
:
1208 case TGSI_TEXTURE_SHADOW2D
:
1209 case TGSI_TEXTURE_SHADOWRECT
:
1211 case TGSI_TEXTURE_3D
:
1212 case TGSI_TEXTURE_CUBE
:
1215 compile_error(ctx
, "unknown texture type: %s\n",
1216 tgsi_texture_names
[tex
]);
1221 compile_assert(ctx
, 0);
1225 static bool check_swiz(struct tgsi_src_register
*src
, const int8_t order
[4])
1228 for (i
= 1; (i
< 4) && order
[i
] >= 0; i
++)
1229 if (src_swiz(src
, i
) != (src_swiz(src
, 0) + order
[i
]))
1234 static bool is_1d(unsigned tex
)
1237 case TGSI_TEXTURE_1D
:
1238 case TGSI_TEXTURE_SHADOW1D
:
1239 case TGSI_TEXTURE_1D_ARRAY
:
1240 case TGSI_TEXTURE_SHADOW1D_ARRAY
:
1247 static struct tgsi_src_register
*
1248 get_tex_coord(struct ir3_compile_context
*ctx
,
1249 struct tgsi_full_instruction
*inst
,
1250 const struct tex_info
*tinf
)
1252 struct tgsi_src_register
*coord
= &inst
->Src
[0].Register
;
1253 struct ir3_instruction
*instr
;
1254 unsigned tex
= inst
->Texture
.Texture
;
1255 bool needs_mov
= false;
1257 /* cat5 instruction cannot seem to handle const or relative: */
1258 if (is_rel_or_const(coord
))
1261 /* 1D textures we fix up w/ 0.5 as 2nd coord: */
1265 /* The texture sample instructions need to coord in successive
1266 * registers/components (ie. src.xy but not src.yx). And TXP
1267 * needs the .w component in .z for 2D.. so in some cases we
1268 * might need to emit some mov instructions to shuffle things
1272 needs_mov
= !check_swiz(coord
, tinf
->order
);
1275 struct tgsi_dst_register tmp_dst
;
1276 struct tgsi_src_register
*tmp_src
;
1279 type_t type_mov
= get_ftype(ctx
);
1281 /* need to move things around: */
1282 tmp_src
= get_internal_temp(ctx
, &tmp_dst
);
1284 for (j
= 0; j
< 4; j
++) {
1285 if (tinf
->order
[j
] < 0)
1287 instr
= instr_create(ctx
, 1, 0); /* mov */
1288 instr
->cat1
.src_type
= type_mov
;
1289 instr
->cat1
.dst_type
= type_mov
;
1290 add_dst_reg(ctx
, instr
, &tmp_dst
, j
);
1291 add_src_reg(ctx
, instr
, coord
,
1292 src_swiz(coord
, tinf
->order
[j
]));
1295 /* fix up .y coord: */
1297 instr
= instr_create(ctx
, 1, 0); /* mov */
1298 instr
->cat1
.src_type
= type_mov
;
1299 instr
->cat1
.dst_type
= type_mov
;
1300 add_dst_reg(ctx
, instr
, &tmp_dst
, 1); /* .y */
1301 ir3_reg_create(instr
, 0, IR3_REG_IMMED
)->fim_val
= 0.5;
1311 trans_samp(const struct instr_translater
*t
,
1312 struct ir3_compile_context
*ctx
,
1313 struct tgsi_full_instruction
*inst
)
1315 struct ir3_instruction
*instr
;
1316 struct tgsi_dst_register
*dst
= &inst
->Dst
[0].Register
;
1317 struct tgsi_src_register
*coord
;
1318 struct tgsi_src_register
*samp
= &inst
->Src
[1].Register
;
1319 const struct tex_info
*tinf
;
1321 tinf
= get_tex_info(ctx
, inst
);
1322 coord
= get_tex_coord(ctx
, inst
, tinf
);
1324 instr
= instr_create(ctx
, 5, t
->opc
);
1325 instr
->cat5
.type
= get_ftype(ctx
);
1326 instr
->cat5
.samp
= samp
->Index
;
1327 instr
->cat5
.tex
= samp
->Index
;
1328 instr
->flags
|= tinf
->flags
;
1330 add_dst_reg_wrmask(ctx
, instr
, dst
, 0, dst
->WriteMask
);
1331 add_src_reg_wrmask(ctx
, instr
, coord
, coord
->SwizzleX
, tinf
->src_wrmask
);
1333 if (t
->opc
!= OPC_SAM
)
1334 add_src_reg_wrmask(ctx
, instr
, coord
, coord
->SwizzleW
, 0x1);
1339 trans_deriv(const struct instr_translater
*t
,
1340 struct ir3_compile_context
*ctx
,
1341 struct tgsi_full_instruction
*inst
)
1343 struct ir3_instruction
*instr
;
1344 struct tgsi_dst_register
*dst
= &inst
->Dst
[0].Register
;
1345 struct tgsi_src_register
*src
= &inst
->Src
[0].Register
;
1346 static const int8_t order
[4] = {0, 1, 2, 3};
1348 if (!check_swiz(src
, order
)) {
1349 struct tgsi_dst_register tmp_dst
;
1350 struct tgsi_src_register
*tmp_src
;
1352 tmp_src
= get_internal_temp(ctx
, &tmp_dst
);
1353 create_mov(ctx
, &tmp_dst
, src
);
1358 /* This might be a workaround for hw bug? Blob compiler always
1359 * seems to work two components at a time for dsy/dsx. It does
1360 * actually seem to work in some cases (or at least some piglit
1361 * tests) for four components at a time. But seems more reliable
1362 * to split this into two instructions like the blob compiler
1366 instr
= instr_create(ctx
, 5, t
->opc
);
1367 instr
->cat5
.type
= get_ftype(ctx
);
1368 add_dst_reg_wrmask(ctx
, instr
, dst
, 0, dst
->WriteMask
& 0x3);
1369 add_src_reg_wrmask(ctx
, instr
, src
, 0, dst
->WriteMask
& 0x3);
1371 instr
= instr_create(ctx
, 5, t
->opc
);
1372 instr
->cat5
.type
= get_ftype(ctx
);
1373 add_dst_reg_wrmask(ctx
, instr
, dst
, 2, (dst
->WriteMask
>> 2) & 0x3);
1374 add_src_reg_wrmask(ctx
, instr
, src
, 2, (dst
->WriteMask
>> 2) & 0x3);
1378 * SEQ(a,b) = (a == b) ? 1.0 : 0.0
1379 * cmps.f.eq tmp0, a, b
1380 * cov.u16f16 dst, tmp0
1382 * SNE(a,b) = (a != b) ? 1.0 : 0.0
1383 * cmps.f.ne tmp0, a, b
1384 * cov.u16f16 dst, tmp0
1386 * SGE(a,b) = (a >= b) ? 1.0 : 0.0
1387 * cmps.f.ge tmp0, a, b
1388 * cov.u16f16 dst, tmp0
1390 * SLE(a,b) = (a <= b) ? 1.0 : 0.0
1391 * cmps.f.le tmp0, a, b
1392 * cov.u16f16 dst, tmp0
1394 * SGT(a,b) = (a > b) ? 1.0 : 0.0
1395 * cmps.f.gt tmp0, a, b
1396 * cov.u16f16 dst, tmp0
1398 * SLT(a,b) = (a < b) ? 1.0 : 0.0
1399 * cmps.f.lt tmp0, a, b
1400 * cov.u16f16 dst, tmp0
1402 * CMP(a,b,c) = (a < 0.0) ? b : c
1403 * cmps.f.lt tmp0, a, {0.0}
1404 * sel.b16 dst, b, tmp0, c
1407 trans_cmp(const struct instr_translater
*t
,
1408 struct ir3_compile_context
*ctx
,
1409 struct tgsi_full_instruction
*inst
)
1411 struct ir3_instruction
*instr
;
1412 struct tgsi_dst_register tmp_dst
;
1413 struct tgsi_src_register
*tmp_src
;
1414 struct tgsi_src_register constval0
;
1415 /* final instruction for CMP() uses orig src1 and src2: */
1416 struct tgsi_dst_register
*dst
= get_dst(ctx
, inst
);
1417 struct tgsi_src_register
*a0
, *a1
, *a2
;
1420 tmp_src
= get_internal_temp(ctx
, &tmp_dst
);
1422 a0
= &inst
->Src
[0].Register
; /* a */
1423 a1
= &inst
->Src
[1].Register
; /* b */
1425 switch (t
->tgsi_opc
) {
1426 case TGSI_OPCODE_SEQ
:
1427 case TGSI_OPCODE_FSEQ
:
1428 condition
= IR3_COND_EQ
;
1430 case TGSI_OPCODE_SNE
:
1431 case TGSI_OPCODE_FSNE
:
1432 condition
= IR3_COND_NE
;
1434 case TGSI_OPCODE_SGE
:
1435 case TGSI_OPCODE_FSGE
:
1436 condition
= IR3_COND_GE
;
1438 case TGSI_OPCODE_SLT
:
1439 case TGSI_OPCODE_FSLT
:
1440 condition
= IR3_COND_LT
;
1442 case TGSI_OPCODE_SLE
:
1443 condition
= IR3_COND_LE
;
1445 case TGSI_OPCODE_SGT
:
1446 condition
= IR3_COND_GT
;
1448 case TGSI_OPCODE_CMP
:
1449 get_immediate(ctx
, &constval0
, fui(0.0));
1450 a0
= &inst
->Src
[0].Register
; /* a */
1451 a1
= &constval0
; /* {0.0} */
1452 condition
= IR3_COND_LT
;
1455 compile_assert(ctx
, 0);
1459 if (is_const(a0
) && is_const(a1
))
1460 a0
= get_unconst(ctx
, a0
);
1462 /* cmps.f.<cond> tmp, a0, a1 */
1463 instr
= instr_create(ctx
, 2, OPC_CMPS_F
);
1464 instr
->cat2
.condition
= condition
;
1465 vectorize(ctx
, instr
, &tmp_dst
, 2, a0
, 0, a1
, 0);
1467 switch (t
->tgsi_opc
) {
1468 case TGSI_OPCODE_SEQ
:
1469 case TGSI_OPCODE_SGE
:
1470 case TGSI_OPCODE_SLE
:
1471 case TGSI_OPCODE_SNE
:
1472 case TGSI_OPCODE_SGT
:
1473 case TGSI_OPCODE_SLT
:
1474 /* cov.u16f16 dst, tmp0 */
1475 instr
= instr_create(ctx
, 1, 0);
1476 instr
->cat1
.src_type
= get_utype(ctx
);
1477 instr
->cat1
.dst_type
= get_ftype(ctx
);
1478 vectorize(ctx
, instr
, dst
, 1, tmp_src
, 0);
1480 case TGSI_OPCODE_FSEQ
:
1481 case TGSI_OPCODE_FSGE
:
1482 case TGSI_OPCODE_FSNE
:
1483 case TGSI_OPCODE_FSLT
:
1484 /* absneg.s dst, (neg)tmp0 */
1485 instr
= instr_create(ctx
, 2, OPC_ABSNEG_S
);
1486 vectorize(ctx
, instr
, dst
, 1, tmp_src
, IR3_REG_NEGATE
);
1488 case TGSI_OPCODE_CMP
:
1489 a1
= &inst
->Src
[1].Register
;
1490 a2
= &inst
->Src
[2].Register
;
1491 /* sel.{b32,b16} dst, src2, tmp, src1 */
1492 instr
= instr_create(ctx
, 3, OPC_SEL_B32
);
1493 vectorize(ctx
, instr
, dst
, 3, a1
, 0, tmp_src
, 0, a2
, 0);
1498 put_dst(ctx
, inst
, dst
);
1502 * USNE(a,b) = (a != b) ? ~0 : 0
1503 * cmps.u32.ne dst, a, b
1505 * USEQ(a,b) = (a == b) ? ~0 : 0
1506 * cmps.u32.eq dst, a, b
1508 * ISGE(a,b) = (a > b) ? ~0 : 0
1509 * cmps.s32.ge dst, a, b
1511 * USGE(a,b) = (a > b) ? ~0 : 0
1512 * cmps.u32.ge dst, a, b
1514 * ISLT(a,b) = (a < b) ? ~0 : 0
1515 * cmps.s32.lt dst, a, b
1517 * USLT(a,b) = (a < b) ? ~0 : 0
1518 * cmps.u32.lt dst, a, b
1522 trans_icmp(const struct instr_translater
*t
,
1523 struct ir3_compile_context
*ctx
,
1524 struct tgsi_full_instruction
*inst
)
1526 struct ir3_instruction
*instr
;
1527 struct tgsi_dst_register
*dst
= get_dst(ctx
, inst
);
1528 struct tgsi_dst_register tmp_dst
;
1529 struct tgsi_src_register
*tmp_src
;
1530 struct tgsi_src_register
*a0
, *a1
;
1533 a0
= &inst
->Src
[0].Register
; /* a */
1534 a1
= &inst
->Src
[1].Register
; /* b */
1536 switch (t
->tgsi_opc
) {
1537 case TGSI_OPCODE_USNE
:
1538 condition
= IR3_COND_NE
;
1540 case TGSI_OPCODE_USEQ
:
1541 condition
= IR3_COND_EQ
;
1543 case TGSI_OPCODE_ISGE
:
1544 case TGSI_OPCODE_USGE
:
1545 condition
= IR3_COND_GE
;
1547 case TGSI_OPCODE_ISLT
:
1548 case TGSI_OPCODE_USLT
:
1549 condition
= IR3_COND_LT
;
1553 compile_assert(ctx
, 0);
1557 if (is_const(a0
) && is_const(a1
))
1558 a0
= get_unconst(ctx
, a0
);
1560 tmp_src
= get_internal_temp(ctx
, &tmp_dst
);
1561 /* cmps.{u32,s32}.<cond> tmp, a0, a1 */
1562 instr
= instr_create(ctx
, 2, t
->opc
);
1563 instr
->cat2
.condition
= condition
;
1564 vectorize(ctx
, instr
, &tmp_dst
, 2, a0
, 0, a1
, 0);
1566 /* absneg.s dst, (neg)tmp */
1567 instr
= instr_create(ctx
, 2, OPC_ABSNEG_S
);
1568 vectorize(ctx
, instr
, dst
, 1, tmp_src
, IR3_REG_NEGATE
);
1570 put_dst(ctx
, inst
, dst
);
1574 * UCMP(a,b,c) = a ? b : c
1575 * sel.b16 dst, b, a, c
1578 trans_ucmp(const struct instr_translater
*t
,
1579 struct ir3_compile_context
*ctx
,
1580 struct tgsi_full_instruction
*inst
)
1582 struct ir3_instruction
*instr
;
1583 struct tgsi_dst_register
*dst
= get_dst(ctx
, inst
);
1584 struct tgsi_src_register
*a0
, *a1
, *a2
;
1586 a0
= &inst
->Src
[0].Register
; /* a */
1587 a1
= &inst
->Src
[1].Register
; /* b */
1588 a2
= &inst
->Src
[2].Register
; /* c */
1590 if (is_rel_or_const(a0
))
1591 a0
= get_unconst(ctx
, a0
);
1593 /* sel.{b32,b16} dst, b, a, c */
1594 instr
= instr_create(ctx
, 3, OPC_SEL_B32
);
1595 vectorize(ctx
, instr
, dst
, 3, a1
, 0, a0
, 0, a2
, 0);
1596 put_dst(ctx
, inst
, dst
);
1601 * Conditional / Flow control
1605 push_branch(struct ir3_compile_context
*ctx
, bool inv
,
1606 struct ir3_instruction
*instr
, struct ir3_instruction
*cond
)
1608 unsigned int idx
= ctx
->branch_count
++;
1609 compile_assert(ctx
, idx
< ARRAY_SIZE(ctx
->branch
));
1610 ctx
->branch
[idx
].instr
= instr
;
1611 ctx
->branch
[idx
].inv
= inv
;
1612 /* else side of branch has same condition: */
1614 ctx
->branch
[idx
].cond
= cond
;
1617 static struct ir3_instruction
*
1618 pop_branch(struct ir3_compile_context
*ctx
)
1620 unsigned int idx
= --ctx
->branch_count
;
1621 return ctx
->branch
[idx
].instr
;
1625 trans_if(const struct instr_translater
*t
,
1626 struct ir3_compile_context
*ctx
,
1627 struct tgsi_full_instruction
*inst
)
1629 struct ir3_instruction
*instr
, *cond
;
1630 struct tgsi_src_register
*src
= &inst
->Src
[0].Register
;
1631 struct tgsi_dst_register tmp_dst
;
1632 struct tgsi_src_register
*tmp_src
;
1633 struct tgsi_src_register constval
;
1635 get_immediate(ctx
, &constval
, fui(0.0));
1636 tmp_src
= get_internal_temp(ctx
, &tmp_dst
);
1639 src
= get_unconst(ctx
, src
);
1641 /* cmps.{f,u}.ne tmp0, b, {0.0} */
1642 instr
= instr_create(ctx
, 2, t
->opc
);
1643 add_dst_reg(ctx
, instr
, &tmp_dst
, 0);
1644 add_src_reg(ctx
, instr
, src
, src
->SwizzleX
);
1645 add_src_reg(ctx
, instr
, &constval
, constval
.SwizzleX
);
1646 instr
->cat2
.condition
= IR3_COND_NE
;
1648 compile_assert(ctx
, instr
->regs
[1]->flags
& IR3_REG_SSA
); /* because get_unconst() */
1649 cond
= instr
->regs
[1]->instr
;
1651 /* meta:flow tmp0 */
1652 instr
= instr_create(ctx
, -1, OPC_META_FLOW
);
1653 ir3_reg_create(instr
, 0, 0); /* dummy dst */
1654 add_src_reg(ctx
, instr
, tmp_src
, TGSI_SWIZZLE_X
);
1656 push_branch(ctx
, false, instr
, cond
);
1657 instr
->flow
.if_block
= push_block(ctx
);
1661 trans_else(const struct instr_translater
*t
,
1662 struct ir3_compile_context
*ctx
,
1663 struct tgsi_full_instruction
*inst
)
1665 struct ir3_instruction
*instr
;
1669 instr
= pop_branch(ctx
);
1671 compile_assert(ctx
, (instr
->category
== -1) &&
1672 (instr
->opc
== OPC_META_FLOW
));
1674 push_branch(ctx
, true, instr
, NULL
);
1675 instr
->flow
.else_block
= push_block(ctx
);
1678 static struct ir3_instruction
*
1679 find_temporary(struct ir3_block
*block
, unsigned n
)
1681 if (block
->parent
&& !block
->temporaries
[n
])
1682 return find_temporary(block
->parent
, n
);
1683 return block
->temporaries
[n
];
1686 static struct ir3_instruction
*
1687 find_output(struct ir3_block
*block
, unsigned n
)
1689 if (block
->parent
&& !block
->outputs
[n
])
1690 return find_output(block
->parent
, n
);
1691 return block
->outputs
[n
];
1694 static struct ir3_instruction
*
1695 create_phi(struct ir3_compile_context
*ctx
, struct ir3_instruction
*cond
,
1696 struct ir3_instruction
*a
, struct ir3_instruction
*b
)
1698 struct ir3_instruction
*phi
;
1700 compile_assert(ctx
, cond
);
1702 /* Either side of the condition could be null.. which
1703 * indicates a variable written on only one side of the
1704 * branch. Normally this should only be variables not
1705 * used outside of that side of the branch. So we could
1706 * just 'return a ? a : b;' in that case. But for better
1707 * defined undefined behavior we just stick in imm{0.0}.
1708 * In the common case of a value only used within the
1709 * one side of the branch, the PHI instruction will not
1713 a
= create_immed(ctx
, 0.0);
1715 b
= create_immed(ctx
, 0.0);
1717 phi
= instr_create(ctx
, -1, OPC_META_PHI
);
1718 ir3_reg_create(phi
, 0, 0); /* dummy dst */
1719 ir3_reg_create(phi
, 0, IR3_REG_SSA
)->instr
= cond
;
1720 ir3_reg_create(phi
, 0, IR3_REG_SSA
)->instr
= a
;
1721 ir3_reg_create(phi
, 0, IR3_REG_SSA
)->instr
= b
;
1727 trans_endif(const struct instr_translater
*t
,
1728 struct ir3_compile_context
*ctx
,
1729 struct tgsi_full_instruction
*inst
)
1731 struct ir3_instruction
*instr
;
1732 struct ir3_block
*ifb
, *elseb
;
1733 struct ir3_instruction
**ifout
, **elseout
;
1734 unsigned i
, ifnout
= 0, elsenout
= 0;
1738 instr
= pop_branch(ctx
);
1740 compile_assert(ctx
, (instr
->category
== -1) &&
1741 (instr
->opc
== OPC_META_FLOW
));
1743 ifb
= instr
->flow
.if_block
;
1744 elseb
= instr
->flow
.else_block
;
1745 /* if there is no else block, the parent block is used for the
1746 * branch-not-taken src of the PHI instructions:
1749 elseb
= ifb
->parent
;
1751 /* worst case sizes: */
1752 ifnout
= ifb
->ntemporaries
+ ifb
->noutputs
;
1753 elsenout
= elseb
->ntemporaries
+ elseb
->noutputs
;
1755 ifout
= ir3_alloc(ctx
->ir
, sizeof(ifb
->outputs
[0]) * ifnout
);
1756 if (elseb
!= ifb
->parent
)
1757 elseout
= ir3_alloc(ctx
->ir
, sizeof(ifb
->outputs
[0]) * elsenout
);
1762 /* generate PHI instructions for any temporaries written: */
1763 for (i
= 0; i
< ifb
->ntemporaries
; i
++) {
1764 struct ir3_instruction
*a
= ifb
->temporaries
[i
];
1765 struct ir3_instruction
*b
= elseb
->temporaries
[i
];
1767 /* if temporary written in if-block, or if else block
1768 * is present and temporary written in else-block:
1770 if (a
|| ((elseb
!= ifb
->parent
) && b
)) {
1771 struct ir3_instruction
*phi
;
1773 /* if only written on one side, find the closest
1774 * enclosing update on other side:
1777 a
= find_temporary(ifb
, i
);
1779 b
= find_temporary(elseb
, i
);
1782 a
= create_output(ifb
, a
, ifnout
++);
1784 if (elseb
!= ifb
->parent
) {
1785 elseout
[elsenout
] = b
;
1786 b
= create_output(elseb
, b
, elsenout
++);
1789 phi
= create_phi(ctx
, instr
, a
, b
);
1790 ctx
->block
->temporaries
[i
] = phi
;
1794 compile_assert(ctx
, ifb
->noutputs
== elseb
->noutputs
);
1796 /* .. and any outputs written: */
1797 for (i
= 0; i
< ifb
->noutputs
; i
++) {
1798 struct ir3_instruction
*a
= ifb
->outputs
[i
];
1799 struct ir3_instruction
*b
= elseb
->outputs
[i
];
1801 /* if output written in if-block, or if else block
1802 * is present and output written in else-block:
1804 if (a
|| ((elseb
!= ifb
->parent
) && b
)) {
1805 struct ir3_instruction
*phi
;
1807 /* if only written on one side, find the closest
1808 * enclosing update on other side:
1811 a
= find_output(ifb
, i
);
1813 b
= find_output(elseb
, i
);
1816 a
= create_output(ifb
, a
, ifnout
++);
1818 if (elseb
!= ifb
->parent
) {
1819 elseout
[elsenout
] = b
;
1820 b
= create_output(elseb
, b
, elsenout
++);
1823 phi
= create_phi(ctx
, instr
, a
, b
);
1824 ctx
->block
->outputs
[i
] = phi
;
1828 ifb
->noutputs
= ifnout
;
1829 ifb
->outputs
= ifout
;
1831 if (elseb
!= ifb
->parent
) {
1832 elseb
->noutputs
= elsenout
;
1833 elseb
->outputs
= elseout
;
1836 // TODO maybe we want to compact block->inputs?
1844 trans_kill(const struct instr_translater
*t
,
1845 struct ir3_compile_context
*ctx
,
1846 struct tgsi_full_instruction
*inst
)
1848 struct ir3_instruction
*instr
, *immed
, *cond
= NULL
;
1851 switch (t
->tgsi_opc
) {
1852 case TGSI_OPCODE_KILL
:
1853 /* unconditional kill, use enclosing if condition: */
1854 if (ctx
->branch_count
> 0) {
1855 unsigned int idx
= ctx
->branch_count
- 1;
1856 cond
= ctx
->branch
[idx
].cond
;
1857 inv
= ctx
->branch
[idx
].inv
;
1859 cond
= create_immed(ctx
, 1.0);
1865 compile_assert(ctx
, cond
);
1867 immed
= create_immed(ctx
, 0.0);
1869 /* cmps.f.ne p0.x, cond, {0.0} */
1870 instr
= instr_create(ctx
, 2, OPC_CMPS_F
);
1871 instr
->cat2
.condition
= IR3_COND_NE
;
1872 ir3_reg_create(instr
, regid(REG_P0
, 0), 0);
1873 ir3_reg_create(instr
, 0, IR3_REG_SSA
)->instr
= cond
;
1874 ir3_reg_create(instr
, 0, IR3_REG_SSA
)->instr
= immed
;
1878 instr
= instr_create(ctx
, 0, OPC_KILL
);
1879 instr
->cat0
.inv
= inv
;
1880 ir3_reg_create(instr
, 0, 0); /* dummy dst */
1881 ir3_reg_create(instr
, 0, IR3_REG_SSA
)->instr
= cond
;
1883 ctx
->kill
[ctx
->kill_count
++] = instr
;
1891 trans_killif(const struct instr_translater
*t
,
1892 struct ir3_compile_context
*ctx
,
1893 struct tgsi_full_instruction
*inst
)
1895 struct tgsi_src_register
*src
= &inst
->Src
[0].Register
;
1896 struct ir3_instruction
*instr
, *immed
, *cond
= NULL
;
1899 immed
= create_immed(ctx
, 0.0);
1901 /* cmps.f.ne p0.x, cond, {0.0} */
1902 instr
= instr_create(ctx
, 2, OPC_CMPS_F
);
1903 instr
->cat2
.condition
= IR3_COND_NE
;
1904 ir3_reg_create(instr
, regid(REG_P0
, 0), 0);
1905 ir3_reg_create(instr
, 0, IR3_REG_SSA
)->instr
= immed
;
1906 add_src_reg(ctx
, instr
, src
, src
->SwizzleX
);
1911 instr
= instr_create(ctx
, 0, OPC_KILL
);
1912 instr
->cat0
.inv
= inv
;
1913 ir3_reg_create(instr
, 0, 0); /* dummy dst */
1914 ir3_reg_create(instr
, 0, IR3_REG_SSA
)->instr
= cond
;
1916 ctx
->kill
[ctx
->kill_count
++] = instr
;
1920 * I2F / U2F / F2I / F2U
1924 trans_cov(const struct instr_translater
*t
,
1925 struct ir3_compile_context
*ctx
,
1926 struct tgsi_full_instruction
*inst
)
1928 struct ir3_instruction
*instr
;
1929 struct tgsi_dst_register
*dst
= get_dst(ctx
, inst
);
1930 struct tgsi_src_register
*src
= &inst
->Src
[0].Register
;
1932 // cov.f32s32 dst, tmp0 /
1933 instr
= instr_create(ctx
, 1, 0);
1934 switch (t
->tgsi_opc
) {
1935 case TGSI_OPCODE_U2F
:
1936 instr
->cat1
.src_type
= TYPE_U32
;
1937 instr
->cat1
.dst_type
= TYPE_F32
;
1939 case TGSI_OPCODE_I2F
:
1940 instr
->cat1
.src_type
= TYPE_S32
;
1941 instr
->cat1
.dst_type
= TYPE_F32
;
1943 case TGSI_OPCODE_F2U
:
1944 instr
->cat1
.src_type
= TYPE_F32
;
1945 instr
->cat1
.dst_type
= TYPE_U32
;
1947 case TGSI_OPCODE_F2I
:
1948 instr
->cat1
.src_type
= TYPE_F32
;
1949 instr
->cat1
.dst_type
= TYPE_S32
;
1953 vectorize(ctx
, instr
, dst
, 1, src
, 0);
1954 put_dst(ctx
, inst
, dst
);
1960 * There is no 32-bit multiply instruction, so splitting a and b into high and
1961 * low components, we get that
1963 * dst = al * bl + ah * bl << 16 + al * bh << 16
1965 * mull.u tmp0, a, b (mul low, i.e. al * bl)
1966 * madsh.m16 tmp1, a, b, tmp0 (mul-add shift high mix, i.e. ah * bl << 16)
1967 * madsh.m16 dst, b, a, tmp1 (i.e. al * bh << 16)
1970 trans_umul(const struct instr_translater
*t
,
1971 struct ir3_compile_context
*ctx
,
1972 struct tgsi_full_instruction
*inst
)
1974 struct ir3_instruction
*instr
;
1975 struct tgsi_dst_register
*dst
= get_dst(ctx
, inst
);
1976 struct tgsi_src_register
*a
= &inst
->Src
[0].Register
;
1977 struct tgsi_src_register
*b
= &inst
->Src
[1].Register
;
1979 struct tgsi_dst_register tmp0_dst
, tmp1_dst
;
1980 struct tgsi_src_register
*tmp0_src
, *tmp1_src
;
1982 tmp0_src
= get_internal_temp(ctx
, &tmp0_dst
);
1983 tmp1_src
= get_internal_temp(ctx
, &tmp1_dst
);
1985 if (is_rel_or_const(a
))
1986 a
= get_unconst(ctx
, a
);
1987 if (is_rel_or_const(b
))
1988 b
= get_unconst(ctx
, b
);
1990 /* mull.u tmp0, a, b */
1991 instr
= instr_create(ctx
, 2, OPC_MULL_U
);
1992 vectorize(ctx
, instr
, &tmp0_dst
, 2, a
, 0, b
, 0);
1994 /* madsh.m16 tmp1, a, b, tmp0 */
1995 instr
= instr_create(ctx
, 3, OPC_MADSH_M16
);
1996 vectorize(ctx
, instr
, &tmp1_dst
, 3, a
, 0, b
, 0, tmp0_src
, 0);
1998 /* madsh.m16 dst, b, a, tmp1 */
1999 instr
= instr_create(ctx
, 3, OPC_MADSH_M16
);
2000 vectorize(ctx
, instr
, dst
, 3, b
, 0, a
, 0, tmp1_src
, 0);
2001 put_dst(ctx
, inst
, dst
);
2005 * Handlers for TGSI instructions which do have 1:1 mapping to native
2010 instr_cat0(const struct instr_translater
*t
,
2011 struct ir3_compile_context
*ctx
,
2012 struct tgsi_full_instruction
*inst
)
2014 instr_create(ctx
, 0, t
->opc
);
2018 instr_cat1(const struct instr_translater
*t
,
2019 struct ir3_compile_context
*ctx
,
2020 struct tgsi_full_instruction
*inst
)
2022 struct tgsi_dst_register
*dst
= get_dst(ctx
, inst
);
2023 struct tgsi_src_register
*src
= &inst
->Src
[0].Register
;
2024 create_mov(ctx
, dst
, src
);
2025 put_dst(ctx
, inst
, dst
);
2029 instr_cat2(const struct instr_translater
*t
,
2030 struct ir3_compile_context
*ctx
,
2031 struct tgsi_full_instruction
*inst
)
2033 struct tgsi_dst_register
*dst
= get_dst(ctx
, inst
);
2034 struct tgsi_src_register
*src0
= &inst
->Src
[0].Register
;
2035 struct tgsi_src_register
*src1
= &inst
->Src
[1].Register
;
2036 struct ir3_instruction
*instr
;
2037 unsigned src0_flags
= 0, src1_flags
= 0;
2039 switch (t
->tgsi_opc
) {
2040 case TGSI_OPCODE_ABS
:
2041 case TGSI_OPCODE_IABS
:
2042 src0_flags
= IR3_REG_ABS
;
2044 case TGSI_OPCODE_INEG
:
2045 src0_flags
= IR3_REG_NEGATE
;
2047 case TGSI_OPCODE_SUB
:
2048 src1_flags
= IR3_REG_NEGATE
;
2067 /* these only have one src reg */
2068 instr
= instr_create(ctx
, 2, t
->opc
);
2069 vectorize(ctx
, instr
, dst
, 1, src0
, src0_flags
);
2072 if (is_const(src0
) && is_const(src1
))
2073 src0
= get_unconst(ctx
, src0
);
2075 instr
= instr_create(ctx
, 2, t
->opc
);
2076 vectorize(ctx
, instr
, dst
, 2, src0
, src0_flags
,
2081 put_dst(ctx
, inst
, dst
);
2085 instr_cat3(const struct instr_translater
*t
,
2086 struct ir3_compile_context
*ctx
,
2087 struct tgsi_full_instruction
*inst
)
2089 struct tgsi_dst_register
*dst
= get_dst(ctx
, inst
);
2090 struct tgsi_src_register
*src0
= &inst
->Src
[0].Register
;
2091 struct tgsi_src_register
*src1
= &inst
->Src
[1].Register
;
2092 struct ir3_instruction
*instr
;
2094 /* in particular, can't handle const for src1 for cat3..
2095 * for mad, we can swap first two src's if needed:
2097 if (is_rel_or_const(src1
)) {
2098 if (is_mad(t
->opc
) && !is_rel_or_const(src0
)) {
2099 struct tgsi_src_register
*tmp
;
2104 src1
= get_unconst(ctx
, src1
);
2108 instr
= instr_create(ctx
, 3, t
->opc
);
2109 vectorize(ctx
, instr
, dst
, 3, src0
, 0, src1
, 0,
2110 &inst
->Src
[2].Register
, 0);
2111 put_dst(ctx
, inst
, dst
);
2115 instr_cat4(const struct instr_translater
*t
,
2116 struct ir3_compile_context
*ctx
,
2117 struct tgsi_full_instruction
*inst
)
2119 struct tgsi_dst_register
*dst
= get_dst(ctx
, inst
);
2120 struct tgsi_src_register
*src
= &inst
->Src
[0].Register
;
2121 struct ir3_instruction
*instr
;
2124 /* seems like blob compiler avoids const as src.. */
2126 src
= get_unconst(ctx
, src
);
2128 /* we need to replicate into each component: */
2129 for (i
= 0; i
< 4; i
++) {
2130 if (dst
->WriteMask
& (1 << i
)) {
2131 instr
= instr_create(ctx
, 4, t
->opc
);
2132 add_dst_reg(ctx
, instr
, dst
, i
);
2133 add_src_reg(ctx
, instr
, src
, src
->SwizzleX
);
2137 put_dst(ctx
, inst
, dst
);
2140 static const struct instr_translater translaters
[TGSI_OPCODE_LAST
] = {
2141 #define INSTR(n, f, ...) \
2142 [TGSI_OPCODE_ ## n] = { .fxn = (f), .tgsi_opc = TGSI_OPCODE_ ## n, ##__VA_ARGS__ }
2144 INSTR(MOV
, instr_cat1
),
2145 INSTR(RCP
, instr_cat4
, .opc
= OPC_RCP
),
2146 INSTR(RSQ
, instr_cat4
, .opc
= OPC_RSQ
),
2147 INSTR(SQRT
, instr_cat4
, .opc
= OPC_SQRT
),
2148 INSTR(MUL
, instr_cat2
, .opc
= OPC_MUL_F
),
2149 INSTR(ADD
, instr_cat2
, .opc
= OPC_ADD_F
),
2150 INSTR(SUB
, instr_cat2
, .opc
= OPC_ADD_F
),
2151 INSTR(MIN
, instr_cat2
, .opc
= OPC_MIN_F
),
2152 INSTR(MAX
, instr_cat2
, .opc
= OPC_MAX_F
),
2153 INSTR(UADD
, instr_cat2
, .opc
= OPC_ADD_U
),
2154 INSTR(IMIN
, instr_cat2
, .opc
= OPC_MIN_S
),
2155 INSTR(UMIN
, instr_cat2
, .opc
= OPC_MIN_U
),
2156 INSTR(IMAX
, instr_cat2
, .opc
= OPC_MAX_S
),
2157 INSTR(UMAX
, instr_cat2
, .opc
= OPC_MAX_U
),
2158 INSTR(AND
, instr_cat2
, .opc
= OPC_AND_B
),
2159 INSTR(OR
, instr_cat2
, .opc
= OPC_OR_B
),
2160 INSTR(NOT
, instr_cat2
, .opc
= OPC_NOT_B
),
2161 INSTR(XOR
, instr_cat2
, .opc
= OPC_XOR_B
),
2162 INSTR(UMUL
, trans_umul
),
2163 INSTR(SHL
, instr_cat2
, .opc
= OPC_SHL_B
),
2164 INSTR(USHR
, instr_cat2
, .opc
= OPC_SHR_B
),
2165 INSTR(ISHR
, instr_cat2
, .opc
= OPC_ASHR_B
),
2166 INSTR(IABS
, instr_cat2
, .opc
= OPC_ABSNEG_S
),
2167 INSTR(INEG
, instr_cat2
, .opc
= OPC_ABSNEG_S
),
2168 INSTR(AND
, instr_cat2
, .opc
= OPC_AND_B
),
2169 INSTR(MAD
, instr_cat3
, .opc
= OPC_MAD_F32
, .hopc
= OPC_MAD_F16
),
2170 INSTR(TRUNC
, instr_cat2
, .opc
= OPC_TRUNC_F
),
2171 INSTR(CLAMP
, trans_clamp
),
2172 INSTR(FLR
, instr_cat2
, .opc
= OPC_FLOOR_F
),
2173 INSTR(ROUND
, instr_cat2
, .opc
= OPC_RNDNE_F
),
2174 INSTR(SSG
, instr_cat2
, .opc
= OPC_SIGN_F
),
2175 INSTR(CEIL
, instr_cat2
, .opc
= OPC_CEIL_F
),
2176 INSTR(ARL
, trans_arl
),
2177 INSTR(UARL
, trans_arl
),
2178 INSTR(EX2
, instr_cat4
, .opc
= OPC_EXP2
),
2179 INSTR(LG2
, instr_cat4
, .opc
= OPC_LOG2
),
2180 INSTR(ABS
, instr_cat2
, .opc
= OPC_ABSNEG_F
),
2181 INSTR(COS
, instr_cat4
, .opc
= OPC_COS
),
2182 INSTR(SIN
, instr_cat4
, .opc
= OPC_SIN
),
2183 INSTR(TEX
, trans_samp
, .opc
= OPC_SAM
, .arg
= TGSI_OPCODE_TEX
),
2184 INSTR(TXP
, trans_samp
, .opc
= OPC_SAM
, .arg
= TGSI_OPCODE_TXP
),
2185 INSTR(TXB
, trans_samp
, .opc
= OPC_SAMB
, .arg
= TGSI_OPCODE_TXB
),
2186 INSTR(TXL
, trans_samp
, .opc
= OPC_SAML
, .arg
= TGSI_OPCODE_TXL
),
2187 INSTR(DDX
, trans_deriv
, .opc
= OPC_DSX
),
2188 INSTR(DDY
, trans_deriv
, .opc
= OPC_DSY
),
2189 INSTR(SGT
, trans_cmp
),
2190 INSTR(SLT
, trans_cmp
),
2191 INSTR(FSLT
, trans_cmp
),
2192 INSTR(SGE
, trans_cmp
),
2193 INSTR(FSGE
, trans_cmp
),
2194 INSTR(SLE
, trans_cmp
),
2195 INSTR(SNE
, trans_cmp
),
2196 INSTR(FSNE
, trans_cmp
),
2197 INSTR(SEQ
, trans_cmp
),
2198 INSTR(FSEQ
, trans_cmp
),
2199 INSTR(CMP
, trans_cmp
),
2200 INSTR(USNE
, trans_icmp
, .opc
= OPC_CMPS_U
),
2201 INSTR(USEQ
, trans_icmp
, .opc
= OPC_CMPS_U
),
2202 INSTR(ISGE
, trans_icmp
, .opc
= OPC_CMPS_S
),
2203 INSTR(USGE
, trans_icmp
, .opc
= OPC_CMPS_U
),
2204 INSTR(ISLT
, trans_icmp
, .opc
= OPC_CMPS_S
),
2205 INSTR(USLT
, trans_icmp
, .opc
= OPC_CMPS_U
),
2206 INSTR(UCMP
, trans_ucmp
),
2207 INSTR(IF
, trans_if
, .opc
= OPC_CMPS_F
),
2208 INSTR(UIF
, trans_if
, .opc
= OPC_CMPS_U
),
2209 INSTR(ELSE
, trans_else
),
2210 INSTR(ENDIF
, trans_endif
),
2211 INSTR(END
, instr_cat0
, .opc
= OPC_END
),
2212 INSTR(KILL
, trans_kill
, .opc
= OPC_KILL
),
2213 INSTR(KILL_IF
, trans_killif
, .opc
= OPC_KILL
),
2214 INSTR(I2F
, trans_cov
),
2215 INSTR(U2F
, trans_cov
),
2216 INSTR(F2I
, trans_cov
),
2217 INSTR(F2U
, trans_cov
),
2221 decl_semantic(const struct tgsi_declaration_semantic
*sem
)
2223 return ir3_semantic_name(sem
->Name
, sem
->Index
);
2226 static struct ir3_instruction
*
2227 decl_in_frag_bary(struct ir3_compile_context
*ctx
, unsigned regid
,
2228 unsigned j
, unsigned inloc
)
2230 struct ir3_instruction
*instr
;
2231 struct ir3_register
*src
;
2233 /* bary.f dst, #inloc, r0.x */
2234 instr
= instr_create(ctx
, 2, OPC_BARY_F
);
2235 ir3_reg_create(instr
, regid
, 0); /* dummy dst */
2236 ir3_reg_create(instr
, 0, IR3_REG_IMMED
)->iim_val
= inloc
;
2237 src
= ir3_reg_create(instr
, 0, IR3_REG_SSA
);
2239 src
->instr
= ctx
->frag_pos
;
2244 /* TGSI_SEMANTIC_POSITION
2245 * """"""""""""""""""""""
2247 * For fragment shaders, TGSI_SEMANTIC_POSITION is used to indicate that
2248 * fragment shader input contains the fragment's window position. The X
2249 * component starts at zero and always increases from left to right.
2250 * The Y component starts at zero and always increases but Y=0 may either
2251 * indicate the top of the window or the bottom depending on the fragment
2252 * coordinate origin convention (see TGSI_PROPERTY_FS_COORD_ORIGIN).
2253 * The Z coordinate ranges from 0 to 1 to represent depth from the front
2254 * to the back of the Z buffer. The W component contains the reciprocol
2255 * of the interpolated vertex position W component.
2257 static struct ir3_instruction
*
2258 decl_in_frag_coord(struct ir3_compile_context
*ctx
, unsigned regid
,
2261 struct ir3_instruction
*instr
, *src
;
2263 compile_assert(ctx
, !ctx
->frag_coord
[j
]);
2265 ctx
->frag_coord
[j
] = create_input(ctx
->block
, NULL
, 0);
2271 /* for frag_coord, we get unsigned values.. we need
2272 * to subtract (integer) 8 and divide by 16 (right-
2273 * shift by 4) then convert to float:
2276 /* add.s tmp, src, -8 */
2277 instr
= instr_create(ctx
, 2, OPC_ADD_S
);
2278 ir3_reg_create(instr
, regid
, 0); /* dummy dst */
2279 ir3_reg_create(instr
, 0, IR3_REG_SSA
)->instr
= ctx
->frag_coord
[j
];
2280 ir3_reg_create(instr
, 0, IR3_REG_IMMED
)->iim_val
= -8;
2283 /* shr.b tmp, tmp, 4 */
2284 instr
= instr_create(ctx
, 2, OPC_SHR_B
);
2285 ir3_reg_create(instr
, regid
, 0); /* dummy dst */
2286 ir3_reg_create(instr
, 0, IR3_REG_SSA
)->instr
= src
;
2287 ir3_reg_create(instr
, 0, IR3_REG_IMMED
)->iim_val
= 4;
2290 /* mov.u32f32 dst, tmp */
2291 instr
= instr_create(ctx
, 1, 0);
2292 instr
->cat1
.src_type
= TYPE_U32
;
2293 instr
->cat1
.dst_type
= TYPE_F32
;
2294 ir3_reg_create(instr
, regid
, 0); /* dummy dst */
2295 ir3_reg_create(instr
, 0, IR3_REG_SSA
)->instr
= src
;
2300 /* seems that we can use these as-is: */
2301 instr
= ctx
->frag_coord
[j
];
2304 compile_error(ctx
, "invalid channel\n");
2305 instr
= create_immed(ctx
, 0.0);
2312 /* TGSI_SEMANTIC_FACE
2313 * """"""""""""""""""
2315 * This label applies to fragment shader inputs only and indicates that
2316 * the register contains front/back-face information of the form (F, 0,
2317 * 0, 1). The first component will be positive when the fragment belongs
2318 * to a front-facing polygon, and negative when the fragment belongs to a
2319 * back-facing polygon.
2321 static struct ir3_instruction
*
2322 decl_in_frag_face(struct ir3_compile_context
*ctx
, unsigned regid
,
2325 struct ir3_instruction
*instr
, *src
;
2329 compile_assert(ctx
, !ctx
->frag_face
);
2331 ctx
->frag_face
= create_input(ctx
->block
, NULL
, 0);
2333 /* for faceness, we always get -1 or 0 (int).. but TGSI expects
2334 * positive vs negative float.. and piglit further seems to
2335 * expect -1.0 or 1.0:
2337 * mul.s tmp, hr0.x, 2
2339 * mov.s16f32, dst, tmp
2343 instr
= instr_create(ctx
, 2, OPC_MUL_S
);
2344 ir3_reg_create(instr
, regid
, 0); /* dummy dst */
2345 ir3_reg_create(instr
, 0, IR3_REG_SSA
)->instr
= ctx
->frag_face
;
2346 ir3_reg_create(instr
, 0, IR3_REG_IMMED
)->iim_val
= 2;
2349 instr
= instr_create(ctx
, 2, OPC_ADD_S
);
2350 ir3_reg_create(instr
, regid
, 0); /* dummy dst */
2351 ir3_reg_create(instr
, 0, IR3_REG_SSA
)->instr
= src
;
2352 ir3_reg_create(instr
, 0, IR3_REG_IMMED
)->iim_val
= 1;
2355 instr
= instr_create(ctx
, 1, 0); /* mov */
2356 instr
->cat1
.src_type
= TYPE_S32
;
2357 instr
->cat1
.dst_type
= TYPE_F32
;
2358 ir3_reg_create(instr
, regid
, 0); /* dummy dst */
2359 ir3_reg_create(instr
, 0, IR3_REG_SSA
)->instr
= src
;
2364 instr
= create_immed(ctx
, 0.0);
2367 instr
= create_immed(ctx
, 1.0);
2370 compile_error(ctx
, "invalid channel\n");
2371 instr
= create_immed(ctx
, 0.0);
2379 decl_in(struct ir3_compile_context
*ctx
, struct tgsi_full_declaration
*decl
)
2381 struct ir3_shader_variant
*so
= ctx
->so
;
2382 unsigned name
= decl
->Semantic
.Name
;
2385 /* I don't think we should get frag shader input without
2386 * semantic info? Otherwise how do inputs get linked to
2389 compile_assert(ctx
, (ctx
->type
== TGSI_PROCESSOR_VERTEX
) ||
2390 decl
->Declaration
.Semantic
);
2392 for (i
= decl
->Range
.First
; i
<= decl
->Range
.Last
; i
++) {
2393 unsigned n
= so
->inputs_count
++;
2394 unsigned r
= regid(i
, 0);
2397 /* we'll figure out the actual components used after scheduling */
2400 DBG("decl in -> r%d", i
);
2402 compile_assert(ctx
, n
< ARRAY_SIZE(so
->inputs
));
2404 so
->inputs
[n
].semantic
= decl_semantic(&decl
->Semantic
);
2405 so
->inputs
[n
].compmask
= (1 << ncomp
) - 1;
2406 so
->inputs
[n
].regid
= r
;
2407 so
->inputs
[n
].inloc
= ctx
->next_inloc
;
2409 for (j
= 0; j
< ncomp
; j
++) {
2410 struct ir3_instruction
*instr
= NULL
;
2412 if (ctx
->type
== TGSI_PROCESSOR_FRAGMENT
) {
2413 /* for fragment shaders, POSITION and FACE are handled
2414 * specially, not using normal varying / bary.f
2416 if (name
== TGSI_SEMANTIC_POSITION
) {
2417 so
->inputs
[n
].bary
= false;
2418 so
->frag_coord
= true;
2419 instr
= decl_in_frag_coord(ctx
, r
+ j
, j
);
2420 } else if (name
== TGSI_SEMANTIC_FACE
) {
2421 so
->inputs
[n
].bary
= false;
2422 so
->frag_face
= true;
2423 instr
= decl_in_frag_face(ctx
, r
+ j
, j
);
2425 so
->inputs
[n
].bary
= true;
2426 instr
= decl_in_frag_bary(ctx
, r
+ j
, j
,
2427 so
->inputs
[n
].inloc
+ j
- 8);
2430 instr
= create_input(ctx
->block
, NULL
, (i
* 4) + j
);
2433 ctx
->block
->inputs
[(i
* 4) + j
] = instr
;
2436 if (so
->inputs
[n
].bary
|| (ctx
->type
== TGSI_PROCESSOR_VERTEX
)) {
2437 ctx
->next_inloc
+= ncomp
;
2438 so
->total_in
+= ncomp
;
2444 decl_out(struct ir3_compile_context
*ctx
, struct tgsi_full_declaration
*decl
)
2446 struct ir3_shader_variant
*so
= ctx
->so
;
2448 unsigned name
= decl
->Semantic
.Name
;
2451 compile_assert(ctx
, decl
->Declaration
.Semantic
);
2453 DBG("decl out[%d] -> r%d", name
, decl
->Range
.First
);
2455 if (ctx
->type
== TGSI_PROCESSOR_VERTEX
) {
2457 case TGSI_SEMANTIC_POSITION
:
2458 so
->writes_pos
= true;
2460 case TGSI_SEMANTIC_PSIZE
:
2461 so
->writes_psize
= true;
2463 case TGSI_SEMANTIC_COLOR
:
2464 case TGSI_SEMANTIC_BCOLOR
:
2465 case TGSI_SEMANTIC_GENERIC
:
2466 case TGSI_SEMANTIC_FOG
:
2467 case TGSI_SEMANTIC_TEXCOORD
:
2470 compile_error(ctx
, "unknown VS semantic name: %s\n",
2471 tgsi_semantic_names
[name
]);
2475 case TGSI_SEMANTIC_POSITION
:
2476 comp
= 2; /* tgsi will write to .z component */
2477 so
->writes_pos
= true;
2479 case TGSI_SEMANTIC_COLOR
:
2482 compile_error(ctx
, "unknown FS semantic name: %s\n",
2483 tgsi_semantic_names
[name
]);
2487 for (i
= decl
->Range
.First
; i
<= decl
->Range
.Last
; i
++) {
2488 unsigned n
= so
->outputs_count
++;
2493 compile_assert(ctx
, n
< ARRAY_SIZE(so
->outputs
));
2495 so
->outputs
[n
].semantic
= decl_semantic(&decl
->Semantic
);
2496 so
->outputs
[n
].regid
= regid(i
, comp
);
2498 /* avoid undefined outputs, stick a dummy mov from imm{0.0},
2499 * which if the output is actually assigned will be over-
2502 for (j
= 0; j
< ncomp
; j
++)
2503 ctx
->block
->outputs
[(i
* 4) + j
] = create_immed(ctx
, 0.0);
2507 /* from TGSI perspective, we actually have inputs. But most of the "inputs"
2508 * for a fragment shader are just bary.f instructions. The *actual* inputs
2509 * from the hw perspective are the frag_pos and optionally frag_coord and
2513 fixup_frag_inputs(struct ir3_compile_context
*ctx
)
2515 struct ir3_shader_variant
*so
= ctx
->so
;
2516 struct ir3_block
*block
= ctx
->block
;
2517 struct ir3_instruction
**inputs
;
2518 struct ir3_instruction
*instr
;
2523 n
= 4; /* always have frag_pos */
2524 n
+= COND(so
->frag_face
, 4);
2525 n
+= COND(so
->frag_coord
, 4);
2527 inputs
= ir3_alloc(ctx
->ir
, n
* (sizeof(struct ir3_instruction
*)));
2529 if (so
->frag_face
) {
2530 /* this ultimately gets assigned to hr0.x so doesn't conflict
2531 * with frag_coord/frag_pos..
2533 inputs
[block
->ninputs
++] = ctx
->frag_face
;
2534 ctx
->frag_face
->regs
[0]->num
= 0;
2536 /* remaining channels not used, but let's avoid confusing
2537 * other parts that expect inputs to come in groups of vec4
2539 inputs
[block
->ninputs
++] = NULL
;
2540 inputs
[block
->ninputs
++] = NULL
;
2541 inputs
[block
->ninputs
++] = NULL
;
2544 /* since we don't know where to set the regid for frag_coord,
2545 * we have to use r0.x for it. But we don't want to *always*
2546 * use r1.x for frag_pos as that could increase the register
2547 * footprint on simple shaders:
2549 if (so
->frag_coord
) {
2550 ctx
->frag_coord
[0]->regs
[0]->num
= regid
++;
2551 ctx
->frag_coord
[1]->regs
[0]->num
= regid
++;
2552 ctx
->frag_coord
[2]->regs
[0]->num
= regid
++;
2553 ctx
->frag_coord
[3]->regs
[0]->num
= regid
++;
2555 inputs
[block
->ninputs
++] = ctx
->frag_coord
[0];
2556 inputs
[block
->ninputs
++] = ctx
->frag_coord
[1];
2557 inputs
[block
->ninputs
++] = ctx
->frag_coord
[2];
2558 inputs
[block
->ninputs
++] = ctx
->frag_coord
[3];
2561 /* we always have frag_pos: */
2562 so
->pos_regid
= regid
;
2565 instr
= create_input(block
, NULL
, block
->ninputs
);
2566 instr
->regs
[0]->num
= regid
++;
2567 inputs
[block
->ninputs
++] = instr
;
2568 ctx
->frag_pos
->regs
[1]->instr
= instr
;
2571 instr
= create_input(block
, NULL
, block
->ninputs
);
2572 instr
->regs
[0]->num
= regid
++;
2573 inputs
[block
->ninputs
++] = instr
;
2574 ctx
->frag_pos
->regs
[2]->instr
= instr
;
2576 block
->inputs
= inputs
;
2580 compile_instructions(struct ir3_compile_context
*ctx
)
2584 /* for fragment shader, we have a single input register (usually
2585 * r0.xy) which is used as the base for bary.f varying fetch instrs:
2587 if (ctx
->type
== TGSI_PROCESSOR_FRAGMENT
) {
2588 struct ir3_instruction
*instr
;
2589 instr
= ir3_instr_create(ctx
->block
, -1, OPC_META_FI
);
2590 ir3_reg_create(instr
, 0, 0);
2591 ir3_reg_create(instr
, 0, IR3_REG_SSA
); /* r0.x */
2592 ir3_reg_create(instr
, 0, IR3_REG_SSA
); /* r0.y */
2593 ctx
->frag_pos
= instr
;
2596 while (!tgsi_parse_end_of_tokens(&ctx
->parser
)) {
2597 tgsi_parse_token(&ctx
->parser
);
2599 switch (ctx
->parser
.FullToken
.Token
.Type
) {
2600 case TGSI_TOKEN_TYPE_DECLARATION
: {
2601 struct tgsi_full_declaration
*decl
=
2602 &ctx
->parser
.FullToken
.FullDeclaration
;
2603 if (decl
->Declaration
.File
== TGSI_FILE_OUTPUT
) {
2604 decl_out(ctx
, decl
);
2605 } else if (decl
->Declaration
.File
== TGSI_FILE_INPUT
) {
2610 case TGSI_TOKEN_TYPE_IMMEDIATE
: {
2611 /* TODO: if we know the immediate is small enough, and only
2612 * used with instructions that can embed an immediate, we
2615 struct tgsi_full_immediate
*imm
=
2616 &ctx
->parser
.FullToken
.FullImmediate
;
2617 unsigned n
= ctx
->so
->immediates_count
++;
2618 compile_assert(ctx
, n
< ARRAY_SIZE(ctx
->so
->immediates
));
2619 memcpy(ctx
->so
->immediates
[n
].val
, imm
->u
, 16);
2622 case TGSI_TOKEN_TYPE_INSTRUCTION
: {
2623 struct tgsi_full_instruction
*inst
=
2624 &ctx
->parser
.FullToken
.FullInstruction
;
2625 unsigned opc
= inst
->Instruction
.Opcode
;
2626 const struct instr_translater
*t
= &translaters
[opc
];
2629 t
->fxn(t
, ctx
, inst
);
2630 ctx
->num_internal_temps
= 0;
2632 compile_assert(ctx
, !ctx
->using_tmp_dst
);
2634 compile_error(ctx
, "unknown TGSI opc: %s\n",
2635 tgsi_get_opcode_name(opc
));
2638 switch (inst
->Instruction
.Saturate
) {
2639 case TGSI_SAT_ZERO_ONE
:
2640 create_clamp_imm(ctx
, &inst
->Dst
[0].Register
,
2641 fui(0.0), fui(1.0));
2643 case TGSI_SAT_MINUS_PLUS_ONE
:
2644 create_clamp_imm(ctx
, &inst
->Dst
[0].Register
,
2645 fui(-1.0), fui(1.0));
2660 compile_dump(struct ir3_compile_context
*ctx
)
2662 const char *name
= (ctx
->so
->type
== SHADER_VERTEX
) ? "vert" : "frag";
2663 static unsigned n
= 0;
2666 snprintf(fname
, sizeof(fname
), "%s-%04u.dot", name
, n
++);
2667 f
= fopen(fname
, "w");
2670 ir3_block_depth(ctx
->block
);
2671 ir3_dump(ctx
->ir
, name
, ctx
->block
, f
);
2676 ir3_compile_shader(struct ir3_shader_variant
*so
,
2677 const struct tgsi_token
*tokens
, struct ir3_shader_key key
,
2680 struct ir3_compile_context ctx
;
2681 struct ir3_block
*block
;
2682 struct ir3_instruction
**inputs
;
2683 unsigned i
, j
, actual_in
;
2688 so
->ir
= ir3_create();
2692 if (compile_init(&ctx
, so
, tokens
) != TGSI_PARSE_OK
) {
2693 DBG("INIT failed!");
2698 compile_instructions(&ctx
);
2702 /* keep track of the inputs from TGSI perspective.. */
2703 inputs
= block
->inputs
;
2705 /* but fixup actual inputs for frag shader: */
2706 if (ctx
.type
== TGSI_PROCESSOR_FRAGMENT
)
2707 fixup_frag_inputs(&ctx
);
2709 /* at this point, for binning pass, throw away unneeded outputs: */
2710 if (key
.binning_pass
) {
2711 for (i
= 0, j
= 0; i
< so
->outputs_count
; i
++) {
2712 unsigned name
= sem2name(so
->outputs
[i
].semantic
);
2713 unsigned idx
= sem2name(so
->outputs
[i
].semantic
);
2715 /* throw away everything but first position/psize */
2716 if ((idx
== 0) && ((name
== TGSI_SEMANTIC_POSITION
) ||
2717 (name
== TGSI_SEMANTIC_PSIZE
))) {
2719 so
->outputs
[j
] = so
->outputs
[i
];
2720 block
->outputs
[(j
*4)+0] = block
->outputs
[(i
*4)+0];
2721 block
->outputs
[(j
*4)+1] = block
->outputs
[(i
*4)+1];
2722 block
->outputs
[(j
*4)+2] = block
->outputs
[(i
*4)+2];
2723 block
->outputs
[(j
*4)+3] = block
->outputs
[(i
*4)+3];
2728 so
->outputs_count
= j
;
2729 block
->noutputs
= j
* 4;
2732 /* for rendering to alpha format, we only need the .w component,
2733 * and we need it to be in the .x position:
2736 for (i
= 0, j
= 0; i
< so
->outputs_count
; i
++) {
2737 unsigned name
= sem2name(so
->outputs
[i
].semantic
);
2739 /* move .w component to .x and discard others: */
2740 if (name
== TGSI_SEMANTIC_COLOR
) {
2741 block
->outputs
[(i
*4)+0] = block
->outputs
[(i
*4)+3];
2742 block
->outputs
[(i
*4)+1] = NULL
;
2743 block
->outputs
[(i
*4)+2] = NULL
;
2744 block
->outputs
[(i
*4)+3] = NULL
;
2749 /* at this point, we want the kill's in the outputs array too,
2750 * so that they get scheduled (since they have no dst).. we've
2751 * already ensured that the array is big enough in push_block():
2753 if (ctx
.type
== TGSI_PROCESSOR_FRAGMENT
) {
2754 for (i
= 0; i
< ctx
.kill_count
; i
++)
2755 block
->outputs
[block
->noutputs
++] = ctx
.kill
[i
];
2758 if (fd_mesa_debug
& FD_DBG_OPTDUMP
)
2761 ret
= ir3_block_flatten(block
);
2763 DBG("FLATTEN failed!");
2766 if ((ret
> 0) && (fd_mesa_debug
& FD_DBG_OPTDUMP
))
2769 if (fd_mesa_debug
& FD_DBG_OPTMSGS
) {
2770 printf("BEFORE CP:\n");
2771 ir3_dump_instr_list(block
->head
);
2775 ir3_block_cp(block
);
2777 if (fd_mesa_debug
& FD_DBG_OPTDUMP
)
2780 ir3_block_depth(block
);
2782 if (fd_mesa_debug
& FD_DBG_OPTMSGS
) {
2783 printf("AFTER DEPTH:\n");
2784 ir3_dump_instr_list(block
->head
);
2787 ret
= ir3_block_sched(block
);
2789 DBG("SCHED failed!");
2793 if (fd_mesa_debug
& FD_DBG_OPTMSGS
) {
2794 printf("AFTER SCHED:\n");
2795 ir3_dump_instr_list(block
->head
);
2798 ret
= ir3_block_ra(block
, so
->type
, key
.half_precision
,
2799 so
->frag_coord
, so
->frag_face
, &so
->has_samp
);
2805 if (fd_mesa_debug
& FD_DBG_OPTMSGS
) {
2806 printf("AFTER RA:\n");
2807 ir3_dump_instr_list(block
->head
);
2810 /* fixup input/outputs: */
2811 for (i
= 0; i
< so
->outputs_count
; i
++) {
2812 so
->outputs
[i
].regid
= block
->outputs
[i
*4]->regs
[0]->num
;
2813 /* preserve hack for depth output.. tgsi writes depth to .z,
2814 * but what we give the hw is the scalar register:
2816 if ((ctx
.type
== TGSI_PROCESSOR_FRAGMENT
) &&
2817 (sem2name(so
->outputs
[i
].semantic
) == TGSI_SEMANTIC_POSITION
))
2818 so
->outputs
[i
].regid
+= 2;
2820 /* Note that some or all channels of an input may be unused: */
2822 for (i
= 0; i
< so
->inputs_count
; i
++) {
2823 unsigned j
, regid
= ~0, compmask
= 0;
2824 so
->inputs
[i
].ncomp
= 0;
2825 for (j
= 0; j
< 4; j
++) {
2826 struct ir3_instruction
*in
= inputs
[(i
*4) + j
];
2828 compmask
|= (1 << j
);
2829 regid
= in
->regs
[0]->num
- j
;
2831 so
->inputs
[i
].ncomp
++;
2834 so
->inputs
[i
].regid
= regid
;
2835 so
->inputs
[i
].compmask
= compmask
;
2838 /* fragment shader always gets full vec4's even if it doesn't
2839 * fetch all components, but vertex shader we need to update
2840 * with the actual number of components fetch, otherwise thing
2841 * will hang due to mismaptch between VFD_DECODE's and
2844 if (so
->type
== SHADER_VERTEX
)
2845 so
->total_in
= actual_in
;
2849 ir3_destroy(so
->ir
);