1 /* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
4 * Copyright (C) 2013 Rob Clark <robclark@freedesktop.org>
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26 * Rob Clark <robclark@freedesktop.org>
31 #include "pipe/p_state.h"
32 #include "util/u_string.h"
33 #include "util/u_memory.h"
34 #include "util/u_inlines.h"
35 #include "tgsi/tgsi_lowering.h"
36 #include "tgsi/tgsi_parse.h"
37 #include "tgsi/tgsi_ureg.h"
38 #include "tgsi/tgsi_info.h"
39 #include "tgsi/tgsi_strings.h"
40 #include "tgsi/tgsi_dump.h"
41 #include "tgsi/tgsi_scan.h"
43 #include "freedreno_util.h"
45 #include "ir3_compiler.h"
46 #include "ir3_shader.h"
48 #include "instr-a3xx.h"
52 struct ir3_compile_context
{
53 const struct tgsi_token
*tokens
;
56 struct ir3_block
*block
;
57 struct ir3_shader_variant
*so
;
59 struct tgsi_parse_context parser
;
62 struct tgsi_shader_info info
;
64 /* last input dst (for setting (ei) flag): */
65 struct ir3_register
*last_input
;
67 /* last instruction with relative addressing: */
68 struct ir3_instruction
*last_rel
;
70 /* for calculating input/output positions/linkages: */
73 unsigned num_internal_temps
;
74 struct tgsi_src_register internal_temps
[6];
76 /* track registers which need to synchronize w/ "complex alu" cat3
77 * instruction pipeline:
81 /* track registers which need to synchronize with texture fetch
86 /* inputs start at r0, temporaries start after last input, and
87 * outputs start after last temporary.
89 * We could be more clever, because this is not a hw restriction,
90 * but probably best just to implement an optimizing pass to
91 * reduce the # of registers used and get rid of redundant mov's
92 * (to output register).
94 unsigned base_reg
[TGSI_FILE_COUNT
];
96 /* idx/slot for last compiler generated immediate */
97 unsigned immediate_idx
;
99 /* stack of branch instructions that start (potentially nested)
100 * branch instructions, so that we can fix up the branch targets
101 * so that we can fix up the branch target on the corresponding
104 struct ir3_instruction
*branch
[16];
105 unsigned int branch_count
;
107 /* used when dst is same as one of the src, to avoid overwriting a
108 * src element before the remaining scalar instructions that make
109 * up the vector operation
111 struct tgsi_dst_register tmp_dst
;
112 struct tgsi_src_register
*tmp_src
;
116 static void vectorize(struct ir3_compile_context
*ctx
,
117 struct ir3_instruction
*instr
, struct tgsi_dst_register
*dst
,
119 static void create_mov(struct ir3_compile_context
*ctx
,
120 struct tgsi_dst_register
*dst
, struct tgsi_src_register
*src
);
123 compile_init(struct ir3_compile_context
*ctx
, struct ir3_shader_variant
*so
,
124 const struct tgsi_token
*tokens
)
126 unsigned ret
, base
= 0;
127 struct tgsi_shader_info
*info
= &ctx
->info
;
128 struct tgsi_lowering_config lconfig
= {
129 .color_two_side
= so
->key
.color_two_side
,
147 case SHADER_FRAGMENT
:
149 lconfig
.saturate_s
= so
->key
.fsaturate_s
;
150 lconfig
.saturate_t
= so
->key
.fsaturate_t
;
151 lconfig
.saturate_r
= so
->key
.fsaturate_r
;
154 lconfig
.saturate_s
= so
->key
.vsaturate_s
;
155 lconfig
.saturate_t
= so
->key
.vsaturate_t
;
156 lconfig
.saturate_r
= so
->key
.vsaturate_r
;
160 ctx
->tokens
= tgsi_transform_lowering(&lconfig
, tokens
, &ctx
->info
);
161 ctx
->free_tokens
= !!ctx
->tokens
;
164 ctx
->tokens
= tokens
;
167 ctx
->block
= ir3_block_create(ctx
->ir
, 0, 0, 0);
169 ctx
->last_input
= NULL
;
170 ctx
->last_rel
= NULL
;
172 ctx
->num_internal_temps
= 0;
173 ctx
->branch_count
= 0;
175 regmask_init(&ctx
->needs_ss
);
176 regmask_init(&ctx
->needs_sy
);
177 memset(ctx
->base_reg
, 0, sizeof(ctx
->base_reg
));
179 /* Immediates go after constants: */
180 ctx
->base_reg
[TGSI_FILE_CONSTANT
] = 0;
181 ctx
->base_reg
[TGSI_FILE_IMMEDIATE
] =
182 info
->file_max
[TGSI_FILE_CONSTANT
] + 1;
184 /* if full precision and fragment shader, don't clobber
185 * r0.x w/ bary fetch:
187 if ((so
->type
== SHADER_FRAGMENT
) && !so
->key
.half_precision
)
190 /* Temporaries after outputs after inputs: */
191 ctx
->base_reg
[TGSI_FILE_INPUT
] = base
;
192 ctx
->base_reg
[TGSI_FILE_OUTPUT
] = base
+
193 info
->file_max
[TGSI_FILE_INPUT
] + 1;
194 ctx
->base_reg
[TGSI_FILE_TEMPORARY
] = base
+
195 info
->file_max
[TGSI_FILE_INPUT
] + 1 +
196 info
->file_max
[TGSI_FILE_OUTPUT
] + 1;
198 so
->first_driver_param
= ~0;
199 so
->first_immediate
= ctx
->base_reg
[TGSI_FILE_IMMEDIATE
];
200 ctx
->immediate_idx
= 4 * (ctx
->info
.file_max
[TGSI_FILE_IMMEDIATE
] + 1);
202 ret
= tgsi_parse_init(&ctx
->parser
, ctx
->tokens
);
203 if (ret
!= TGSI_PARSE_OK
)
206 ctx
->type
= ctx
->parser
.FullHeader
.Processor
.Processor
;
212 compile_error(struct ir3_compile_context
*ctx
, const char *format
, ...)
215 va_start(ap
, format
);
216 _debug_vprintf(format
, ap
);
218 tgsi_dump(ctx
->tokens
, 0);
222 #define compile_assert(ctx, cond) do { \
223 if (!(cond)) compile_error((ctx), "failed assert: "#cond"\n"); \
227 compile_free(struct ir3_compile_context
*ctx
)
229 if (ctx
->free_tokens
)
230 free((void *)ctx
->tokens
);
231 tgsi_parse_free(&ctx
->parser
);
234 struct instr_translater
{
235 void (*fxn
)(const struct instr_translater
*t
,
236 struct ir3_compile_context
*ctx
,
237 struct tgsi_full_instruction
*inst
);
240 opc_t hopc
; /* opc to use for half_precision mode, if different */
245 handle_last_rel(struct ir3_compile_context
*ctx
)
248 ctx
->last_rel
->flags
|= IR3_INSTR_UL
;
249 ctx
->last_rel
= NULL
;
253 static struct ir3_instruction
*
254 instr_create(struct ir3_compile_context
*ctx
, int category
, opc_t opc
)
256 return ir3_instr_create(ctx
->block
, category
, opc
);
260 add_nop(struct ir3_compile_context
*ctx
, unsigned count
)
263 instr_create(ctx
, 0, OPC_NOP
);
267 src_flags(struct ir3_compile_context
*ctx
, struct ir3_register
*reg
)
271 if (reg
->flags
& (IR3_REG_CONST
| IR3_REG_IMMED
))
274 if (regmask_get(&ctx
->needs_ss
, reg
)) {
275 flags
|= IR3_INSTR_SS
;
276 regmask_init(&ctx
->needs_ss
);
279 if (regmask_get(&ctx
->needs_sy
, reg
)) {
280 flags
|= IR3_INSTR_SY
;
281 regmask_init(&ctx
->needs_sy
);
287 static struct ir3_register
*
288 add_dst_reg(struct ir3_compile_context
*ctx
, struct ir3_instruction
*instr
,
289 const struct tgsi_dst_register
*dst
, unsigned chan
)
291 unsigned flags
= 0, num
= 0;
292 struct ir3_register
*reg
;
295 case TGSI_FILE_OUTPUT
:
296 case TGSI_FILE_TEMPORARY
:
297 num
= dst
->Index
+ ctx
->base_reg
[dst
->File
];
299 case TGSI_FILE_ADDRESS
:
303 compile_error(ctx
, "unsupported dst register file: %s\n",
304 tgsi_file_name(dst
->File
));
309 flags
|= IR3_REG_RELATIV
;
310 if (ctx
->so
->key
.half_precision
)
311 flags
|= IR3_REG_HALF
;
313 reg
= ir3_reg_create(instr
, regid(num
, chan
), flags
);
316 ctx
->last_rel
= instr
;
321 static struct ir3_register
*
322 add_src_reg(struct ir3_compile_context
*ctx
, struct ir3_instruction
*instr
,
323 const struct tgsi_src_register
*src
, unsigned chan
)
325 unsigned flags
= 0, num
= 0;
326 struct ir3_register
*reg
;
328 /* TODO we need to use a mov to temp for const >= 64.. or maybe
329 * we could use relative addressing..
331 compile_assert(ctx
, src
->Index
< 64);
334 case TGSI_FILE_IMMEDIATE
:
335 /* TODO if possible, use actual immediate instead of const.. but
336 * TGSI has vec4 immediates, we can only embed scalar (of limited
337 * size, depending on instruction..)
339 case TGSI_FILE_CONSTANT
:
340 flags
|= IR3_REG_CONST
;
341 num
= src
->Index
+ ctx
->base_reg
[src
->File
];
343 case TGSI_FILE_OUTPUT
:
344 /* NOTE: we should only end up w/ OUTPUT file for things like
345 * clamp()'ing saturated dst instructions
347 case TGSI_FILE_INPUT
:
348 case TGSI_FILE_TEMPORARY
:
349 num
= src
->Index
+ ctx
->base_reg
[src
->File
];
352 compile_error(ctx
, "unsupported src register file: %s\n",
353 tgsi_file_name(src
->File
));
358 flags
|= IR3_REG_ABS
;
360 flags
|= IR3_REG_NEGATE
;
362 flags
|= IR3_REG_RELATIV
;
363 if (ctx
->so
->key
.half_precision
)
364 flags
|= IR3_REG_HALF
;
366 reg
= ir3_reg_create(instr
, regid(num
, chan
), flags
);
369 ctx
->last_rel
= instr
;
371 instr
->flags
|= src_flags(ctx
, reg
);
377 src_from_dst(struct tgsi_src_register
*src
, struct tgsi_dst_register
*dst
)
379 src
->File
= dst
->File
;
380 src
->Indirect
= dst
->Indirect
;
381 src
->Dimension
= dst
->Dimension
;
382 src
->Index
= dst
->Index
;
385 src
->SwizzleX
= TGSI_SWIZZLE_X
;
386 src
->SwizzleY
= TGSI_SWIZZLE_Y
;
387 src
->SwizzleZ
= TGSI_SWIZZLE_Z
;
388 src
->SwizzleW
= TGSI_SWIZZLE_W
;
391 /* Get internal-temp src/dst to use for a sequence of instructions
392 * generated by a single TGSI op.
394 static struct tgsi_src_register
*
395 get_internal_temp(struct ir3_compile_context
*ctx
,
396 struct tgsi_dst_register
*tmp_dst
)
398 struct tgsi_src_register
*tmp_src
;
401 tmp_dst
->File
= TGSI_FILE_TEMPORARY
;
402 tmp_dst
->WriteMask
= TGSI_WRITEMASK_XYZW
;
403 tmp_dst
->Indirect
= 0;
404 tmp_dst
->Dimension
= 0;
406 /* assign next temporary: */
407 n
= ctx
->num_internal_temps
++;
408 compile_assert(ctx
, n
< ARRAY_SIZE(ctx
->internal_temps
));
409 tmp_src
= &ctx
->internal_temps
[n
];
411 tmp_dst
->Index
= ctx
->info
.file_max
[TGSI_FILE_TEMPORARY
] + n
+ 1;
413 src_from_dst(tmp_src
, tmp_dst
);
418 /* Get internal half-precision temp src/dst to use for a sequence of
419 * instructions generated by a single TGSI op.
421 static struct tgsi_src_register
*
422 get_internal_temp_hr(struct ir3_compile_context
*ctx
,
423 struct tgsi_dst_register
*tmp_dst
)
425 struct tgsi_src_register
*tmp_src
;
428 if (ctx
->so
->key
.half_precision
)
429 return get_internal_temp(ctx
, tmp_dst
);
431 tmp_dst
->File
= TGSI_FILE_TEMPORARY
;
432 tmp_dst
->WriteMask
= TGSI_WRITEMASK_XYZW
;
433 tmp_dst
->Indirect
= 0;
434 tmp_dst
->Dimension
= 0;
436 /* assign next temporary: */
437 n
= ctx
->num_internal_temps
++;
438 compile_assert(ctx
, n
< ARRAY_SIZE(ctx
->internal_temps
));
439 tmp_src
= &ctx
->internal_temps
[n
];
441 /* just use hr0 because no one else should be using half-
446 src_from_dst(tmp_src
, tmp_dst
);
452 is_const(struct tgsi_src_register
*src
)
454 return (src
->File
== TGSI_FILE_CONSTANT
) ||
455 (src
->File
== TGSI_FILE_IMMEDIATE
);
459 is_relative(struct tgsi_src_register
*src
)
461 return src
->Indirect
;
465 is_rel_or_const(struct tgsi_src_register
*src
)
467 return is_relative(src
) || is_const(src
);
471 get_ftype(struct ir3_compile_context
*ctx
)
473 return ctx
->so
->key
.half_precision
? TYPE_F16
: TYPE_F32
;
477 get_utype(struct ir3_compile_context
*ctx
)
479 return ctx
->so
->key
.half_precision
? TYPE_U16
: TYPE_U32
;
483 src_swiz(struct tgsi_src_register
*src
, int chan
)
486 case 0: return src
->SwizzleX
;
487 case 1: return src
->SwizzleY
;
488 case 2: return src
->SwizzleZ
;
489 case 3: return src
->SwizzleW
;
495 /* for instructions that cannot take a const register as src, if needed
496 * generate a move to temporary gpr:
498 static struct tgsi_src_register
*
499 get_unconst(struct ir3_compile_context
*ctx
, struct tgsi_src_register
*src
)
501 struct tgsi_dst_register tmp_dst
;
502 struct tgsi_src_register
*tmp_src
;
504 compile_assert(ctx
, is_rel_or_const(src
));
506 tmp_src
= get_internal_temp(ctx
, &tmp_dst
);
508 create_mov(ctx
, &tmp_dst
, src
);
514 get_immediate(struct ir3_compile_context
*ctx
,
515 struct tgsi_src_register
*reg
, uint32_t val
)
517 unsigned neg
, swiz
, idx
, i
;
518 /* actually maps 1:1 currently.. not sure if that is safe to rely on: */
519 static const unsigned swiz2tgsi
[] = {
520 TGSI_SWIZZLE_X
, TGSI_SWIZZLE_Y
, TGSI_SWIZZLE_Z
, TGSI_SWIZZLE_W
,
523 for (i
= 0; i
< ctx
->immediate_idx
; i
++) {
527 if (ctx
->so
->immediates
[idx
].val
[swiz
] == val
) {
532 if (ctx
->so
->immediates
[idx
].val
[swiz
] == -val
) {
538 if (i
== ctx
->immediate_idx
) {
539 /* need to generate a new immediate: */
543 ctx
->so
->immediates
[idx
].val
[swiz
] = val
;
544 ctx
->so
->immediates_count
= idx
+ 1;
545 ctx
->immediate_idx
++;
548 reg
->File
= TGSI_FILE_IMMEDIATE
;
554 reg
->SwizzleX
= swiz2tgsi
[swiz
];
555 reg
->SwizzleY
= swiz2tgsi
[swiz
];
556 reg
->SwizzleZ
= swiz2tgsi
[swiz
];
557 reg
->SwizzleW
= swiz2tgsi
[swiz
];
561 create_mov(struct ir3_compile_context
*ctx
, struct tgsi_dst_register
*dst
,
562 struct tgsi_src_register
*src
)
564 type_t type_mov
= get_ftype(ctx
);
567 for (i
= 0; i
< 4; i
++) {
568 /* move to destination: */
569 if (dst
->WriteMask
& (1 << i
)) {
570 struct ir3_instruction
*instr
;
572 if (src
->Absolute
|| src
->Negate
) {
573 /* can't have abs or neg on a mov instr, so use
574 * absneg.f instead to handle these cases:
576 instr
= instr_create(ctx
, 2, OPC_ABSNEG_F
);
578 instr
= instr_create(ctx
, 1, 0);
579 instr
->cat1
.src_type
= type_mov
;
580 instr
->cat1
.dst_type
= type_mov
;
583 add_dst_reg(ctx
, instr
, dst
, i
);
584 add_src_reg(ctx
, instr
, src
, src_swiz(src
, i
));
592 create_clamp(struct ir3_compile_context
*ctx
,
593 struct tgsi_dst_register
*dst
, struct tgsi_src_register
*val
,
594 struct tgsi_src_register
*minval
, struct tgsi_src_register
*maxval
)
596 struct ir3_instruction
*instr
;
598 instr
= instr_create(ctx
, 2, OPC_MAX_F
);
599 vectorize(ctx
, instr
, dst
, 2, val
, 0, minval
, 0);
601 instr
= instr_create(ctx
, 2, OPC_MIN_F
);
602 vectorize(ctx
, instr
, dst
, 2, val
, 0, maxval
, 0);
606 create_clamp_imm(struct ir3_compile_context
*ctx
,
607 struct tgsi_dst_register
*dst
,
608 uint32_t minval
, uint32_t maxval
)
610 struct tgsi_src_register minconst
, maxconst
;
611 struct tgsi_src_register src
;
613 src_from_dst(&src
, dst
);
615 get_immediate(ctx
, &minconst
, minval
);
616 get_immediate(ctx
, &maxconst
, maxval
);
618 create_clamp(ctx
, dst
, &src
, &minconst
, &maxconst
);
621 static struct tgsi_dst_register
*
622 get_dst(struct ir3_compile_context
*ctx
, struct tgsi_full_instruction
*inst
)
624 struct tgsi_dst_register
*dst
= &inst
->Dst
[0].Register
;
626 for (i
= 0; i
< inst
->Instruction
.NumSrcRegs
; i
++) {
627 struct tgsi_src_register
*src
= &inst
->Src
[i
].Register
;
628 if ((src
->File
== dst
->File
) && (src
->Index
== dst
->Index
)) {
629 if ((dst
->WriteMask
== TGSI_WRITEMASK_XYZW
) &&
630 (src
->SwizzleX
== TGSI_SWIZZLE_X
) &&
631 (src
->SwizzleY
== TGSI_SWIZZLE_Y
) &&
632 (src
->SwizzleZ
== TGSI_SWIZZLE_Z
) &&
633 (src
->SwizzleW
== TGSI_SWIZZLE_W
))
635 ctx
->tmp_src
= get_internal_temp(ctx
, &ctx
->tmp_dst
);
636 ctx
->tmp_dst
.WriteMask
= dst
->WriteMask
;
645 put_dst(struct ir3_compile_context
*ctx
, struct tgsi_full_instruction
*inst
,
646 struct tgsi_dst_register
*dst
)
648 /* if necessary, add mov back into original dst: */
649 if (dst
!= &inst
->Dst
[0].Register
) {
650 create_mov(ctx
, &inst
->Dst
[0].Register
, ctx
->tmp_src
);
654 /* helper to generate the necessary repeat and/or additional instructions
655 * to turn a scalar instruction into a vector operation:
658 vectorize(struct ir3_compile_context
*ctx
, struct ir3_instruction
*instr
,
659 struct tgsi_dst_register
*dst
, int nsrcs
, ...)
663 bool indirect
= dst
->Indirect
;
665 add_dst_reg(ctx
, instr
, dst
, TGSI_SWIZZLE_X
);
668 for (j
= 0; j
< nsrcs
; j
++) {
669 struct tgsi_src_register
*src
=
670 va_arg(ap
, struct tgsi_src_register
*);
671 unsigned flags
= va_arg(ap
, unsigned);
672 struct ir3_register
*reg
;
673 if (flags
& IR3_REG_IMMED
) {
674 reg
= ir3_reg_create(instr
, 0, IR3_REG_IMMED
);
675 /* this is an ugly cast.. should have put flags first! */
676 reg
->iim_val
= *(int *)&src
;
678 reg
= add_src_reg(ctx
, instr
, src
, TGSI_SWIZZLE_X
);
679 indirect
|= src
->Indirect
;
681 reg
->flags
|= flags
& ~IR3_REG_NEGATE
;
682 if (flags
& IR3_REG_NEGATE
)
683 reg
->flags
^= IR3_REG_NEGATE
;
687 for (i
= 0; i
< 4; i
++) {
688 if (dst
->WriteMask
& (1 << i
)) {
689 struct ir3_instruction
*cur
;
694 cur
= ir3_instr_clone(instr
);
695 cur
->flags
&= ~(IR3_INSTR_SY
| IR3_INSTR_SS
| IR3_INSTR_JP
);
698 /* fix-up dst register component: */
699 cur
->regs
[0]->num
= regid(cur
->regs
[0]->num
>> 2, i
);
701 /* fix-up src register component: */
703 for (j
= 0; j
< nsrcs
; j
++) {
704 struct tgsi_src_register
*src
=
705 va_arg(ap
, struct tgsi_src_register
*);
706 unsigned flags
= va_arg(ap
, unsigned);
707 if (!(flags
& IR3_REG_IMMED
)) {
708 cur
->regs
[j
+1]->num
=
709 regid(cur
->regs
[j
+1]->num
>> 2,
711 cur
->flags
|= src_flags(ctx
, cur
->regs
[j
+1]);
721 /* pad w/ nop's.. at least until we are clever enough to
722 * figure out if we really need to..
728 * Handlers for TGSI instructions which do not have a 1:1 mapping to
729 * native instructions:
733 trans_clamp(const struct instr_translater
*t
,
734 struct ir3_compile_context
*ctx
,
735 struct tgsi_full_instruction
*inst
)
737 struct tgsi_dst_register
*dst
= get_dst(ctx
, inst
);
738 struct tgsi_src_register
*src0
= &inst
->Src
[0].Register
;
739 struct tgsi_src_register
*src1
= &inst
->Src
[1].Register
;
740 struct tgsi_src_register
*src2
= &inst
->Src
[2].Register
;
742 create_clamp(ctx
, dst
, src0
, src1
, src2
);
744 put_dst(ctx
, inst
, dst
);
747 /* ARL(x) = x, but mova from hrN.x to a0.. */
749 trans_arl(const struct instr_translater
*t
,
750 struct ir3_compile_context
*ctx
,
751 struct tgsi_full_instruction
*inst
)
753 struct ir3_instruction
*instr
;
754 struct tgsi_dst_register tmp_dst
;
755 struct tgsi_src_register
*tmp_src
;
756 struct tgsi_dst_register
*dst
= &inst
->Dst
[0].Register
;
757 struct tgsi_src_register
*src
= &inst
->Src
[0].Register
;
758 unsigned chan
= src
->SwizzleX
;
759 compile_assert(ctx
, dst
->File
== TGSI_FILE_ADDRESS
);
761 handle_last_rel(ctx
);
763 tmp_src
= get_internal_temp_hr(ctx
, &tmp_dst
);
765 /* cov.{f32,f16}s16 Rtmp, Rsrc */
766 instr
= instr_create(ctx
, 1, 0);
767 instr
->cat1
.src_type
= get_ftype(ctx
);
768 instr
->cat1
.dst_type
= TYPE_S16
;
769 add_dst_reg(ctx
, instr
, &tmp_dst
, chan
)->flags
|= IR3_REG_HALF
;
770 add_src_reg(ctx
, instr
, src
, chan
);
774 /* shl.b Rtmp, Rtmp, 2 */
775 instr
= instr_create(ctx
, 2, OPC_SHL_B
);
776 add_dst_reg(ctx
, instr
, &tmp_dst
, chan
)->flags
|= IR3_REG_HALF
;
777 add_src_reg(ctx
, instr
, tmp_src
, chan
)->flags
|= IR3_REG_HALF
;
778 ir3_reg_create(instr
, 0, IR3_REG_IMMED
)->iim_val
= 2;
783 instr
= instr_create(ctx
, 1, 0);
784 instr
->cat1
.src_type
= TYPE_S16
;
785 instr
->cat1
.dst_type
= TYPE_S16
;
786 add_dst_reg(ctx
, instr
, dst
, 0)->flags
|= IR3_REG_HALF
;
787 add_src_reg(ctx
, instr
, tmp_src
, chan
)->flags
|= IR3_REG_HALF
;
789 /* need to ensure 5 instr slots before a0 is used: */
793 /* texture fetch/sample instructions: */
795 trans_samp(const struct instr_translater
*t
,
796 struct ir3_compile_context
*ctx
,
797 struct tgsi_full_instruction
*inst
)
799 struct ir3_register
*r
;
800 struct ir3_instruction
*instr
;
801 struct tgsi_src_register
*coord
= &inst
->Src
[0].Register
;
802 struct tgsi_src_register
*samp
= &inst
->Src
[1].Register
;
803 unsigned tex
= inst
->Texture
.Texture
;
805 unsigned i
, flags
= 0, src_wrmask
;
806 bool needs_mov
= false;
809 case TGSI_OPCODE_TEX
:
810 if (tex
== TGSI_TEXTURE_2D
) {
811 order
= (int8_t[4]){ 0, 1, -1, -1 };
812 src_wrmask
= TGSI_WRITEMASK_XY
;
814 order
= (int8_t[4]){ 0, 1, 2, -1 };
815 src_wrmask
= TGSI_WRITEMASK_XYZ
;
818 case TGSI_OPCODE_TXP
:
819 if (tex
== TGSI_TEXTURE_2D
) {
820 order
= (int8_t[4]){ 0, 1, 3, -1 };
821 src_wrmask
= TGSI_WRITEMASK_XYZ
;
823 order
= (int8_t[4]){ 0, 1, 2, 3 };
824 src_wrmask
= TGSI_WRITEMASK_XYZW
;
826 flags
|= IR3_INSTR_P
;
829 compile_assert(ctx
, 0);
833 if ((tex
== TGSI_TEXTURE_3D
) || (tex
== TGSI_TEXTURE_CUBE
)) {
835 flags
|= IR3_INSTR_3D
;
838 /* cat5 instruction cannot seem to handle const or relative: */
839 if (is_rel_or_const(coord
))
842 /* The texture sample instructions need to coord in successive
843 * registers/components (ie. src.xy but not src.yx). And TXP
844 * needs the .w component in .z for 2D.. so in some cases we
845 * might need to emit some mov instructions to shuffle things
848 for (i
= 1; (i
< 4) && (order
[i
] >= 0) && !needs_mov
; i
++)
849 if (src_swiz(coord
, i
) != (src_swiz(coord
, 0) + order
[i
]))
853 struct tgsi_dst_register tmp_dst
;
854 struct tgsi_src_register
*tmp_src
;
857 type_t type_mov
= get_ftype(ctx
);
859 /* need to move things around: */
860 tmp_src
= get_internal_temp(ctx
, &tmp_dst
);
862 for (j
= 0; (j
< 4) && (order
[j
] >= 0); j
++) {
863 instr
= instr_create(ctx
, 1, 0);
864 instr
->cat1
.src_type
= type_mov
;
865 instr
->cat1
.dst_type
= type_mov
;
866 add_dst_reg(ctx
, instr
, &tmp_dst
, j
);
867 add_src_reg(ctx
, instr
, coord
,
868 src_swiz(coord
, order
[j
]));
876 instr
= instr_create(ctx
, 5, t
->opc
);
877 instr
->cat5
.type
= get_ftype(ctx
);
878 instr
->cat5
.samp
= samp
->Index
;
879 instr
->cat5
.tex
= samp
->Index
;
880 instr
->flags
|= flags
;
882 r
= add_dst_reg(ctx
, instr
, &inst
->Dst
[0].Register
, 0);
883 r
->wrmask
= inst
->Dst
[0].Register
.WriteMask
;
885 add_src_reg(ctx
, instr
, coord
, coord
->SwizzleX
)->wrmask
= src_wrmask
;
887 /* after add_src_reg() so we don't set (sy) on sam instr itself! */
888 regmask_set(&ctx
->needs_sy
, r
);
892 * SEQ(a,b) = (a == b) ? 1.0 : 0.0
893 * cmps.f.eq tmp0, b, a
894 * cov.u16f16 dst, tmp0
896 * SNE(a,b) = (a != b) ? 1.0 : 0.0
897 * cmps.f.eq tmp0, b, a
898 * add.s tmp0, tmp0, -1
899 * sel.f16 dst, {0.0}, tmp0, {1.0}
901 * SGE(a,b) = (a >= b) ? 1.0 : 0.0
902 * cmps.f.ge tmp0, a, b
903 * cov.u16f16 dst, tmp0
905 * SLE(a,b) = (a <= b) ? 1.0 : 0.0
906 * cmps.f.ge tmp0, b, a
907 * cov.u16f16 dst, tmp0
909 * SGT(a,b) = (a > b) ? 1.0 : 0.0
910 * cmps.f.ge tmp0, b, a
911 * add.s tmp0, tmp0, -1
912 * sel.f16 dst, {0.0}, tmp0, {1.0}
914 * SLT(a,b) = (a < b) ? 1.0 : 0.0
915 * cmps.f.ge tmp0, a, b
916 * add.s tmp0, tmp0, -1
917 * sel.f16 dst, {0.0}, tmp0, {1.0}
919 * CMP(a,b,c) = (a < 0.0) ? b : c
920 * cmps.f.ge tmp0, a, {0.0}
921 * add.s tmp0, tmp0, -1
922 * sel.f16 dst, c, tmp0, b
925 trans_cmp(const struct instr_translater
*t
,
926 struct ir3_compile_context
*ctx
,
927 struct tgsi_full_instruction
*inst
)
929 struct ir3_instruction
*instr
;
930 struct tgsi_dst_register tmp_dst
;
931 struct tgsi_src_register
*tmp_src
;
932 struct tgsi_src_register constval0
, constval1
;
933 /* final instruction for CMP() uses orig src1 and src2: */
934 struct tgsi_dst_register
*dst
= get_dst(ctx
, inst
);
935 struct tgsi_src_register
*a0
, *a1
;
938 tmp_src
= get_internal_temp(ctx
, &tmp_dst
);
940 switch (t
->tgsi_opc
) {
941 case TGSI_OPCODE_SEQ
:
942 case TGSI_OPCODE_SNE
:
943 a0
= &inst
->Src
[1].Register
; /* b */
944 a1
= &inst
->Src
[0].Register
; /* a */
945 condition
= IR3_COND_EQ
;
947 case TGSI_OPCODE_SGE
:
948 case TGSI_OPCODE_SLT
:
949 a0
= &inst
->Src
[0].Register
; /* a */
950 a1
= &inst
->Src
[1].Register
; /* b */
951 condition
= IR3_COND_GE
;
953 case TGSI_OPCODE_SLE
:
954 case TGSI_OPCODE_SGT
:
955 a0
= &inst
->Src
[1].Register
; /* b */
956 a1
= &inst
->Src
[0].Register
; /* a */
957 condition
= IR3_COND_GE
;
959 case TGSI_OPCODE_CMP
:
960 get_immediate(ctx
, &constval0
, fui(0.0));
961 a0
= &inst
->Src
[0].Register
; /* a */
962 a1
= &constval0
; /* {0.0} */
963 condition
= IR3_COND_GE
;
966 compile_assert(ctx
, 0);
970 if (is_const(a0
) && is_const(a1
))
971 a0
= get_unconst(ctx
, a0
);
973 /* cmps.f.ge tmp, a0, a1 */
974 instr
= instr_create(ctx
, 2, OPC_CMPS_F
);
975 instr
->cat2
.condition
= condition
;
976 vectorize(ctx
, instr
, &tmp_dst
, 2, a0
, 0, a1
, 0);
978 switch (t
->tgsi_opc
) {
979 case TGSI_OPCODE_SEQ
:
980 case TGSI_OPCODE_SGE
:
981 case TGSI_OPCODE_SLE
:
982 /* cov.u16f16 dst, tmp0 */
983 instr
= instr_create(ctx
, 1, 0);
984 instr
->cat1
.src_type
= get_utype(ctx
);
985 instr
->cat1
.dst_type
= get_ftype(ctx
);
986 vectorize(ctx
, instr
, dst
, 1, tmp_src
, 0);
988 case TGSI_OPCODE_SNE
:
989 case TGSI_OPCODE_SGT
:
990 case TGSI_OPCODE_SLT
:
991 case TGSI_OPCODE_CMP
:
992 /* add.s tmp, tmp, -1 */
993 instr
= instr_create(ctx
, 2, OPC_ADD_S
);
994 vectorize(ctx
, instr
, &tmp_dst
, 2, tmp_src
, 0, -1, IR3_REG_IMMED
);
996 if (t
->tgsi_opc
== TGSI_OPCODE_CMP
) {
997 /* sel.{f32,f16} dst, src2, tmp, src1 */
998 instr
= instr_create(ctx
, 3,
999 ctx
->so
->key
.half_precision
? OPC_SEL_F16
: OPC_SEL_F32
);
1000 vectorize(ctx
, instr
, dst
, 3,
1001 &inst
->Src
[2].Register
, 0,
1003 &inst
->Src
[1].Register
, 0);
1005 get_immediate(ctx
, &constval0
, fui(0.0));
1006 get_immediate(ctx
, &constval1
, fui(1.0));
1007 /* sel.{f32,f16} dst, {0.0}, tmp0, {1.0} */
1008 instr
= instr_create(ctx
, 3,
1009 ctx
->so
->key
.half_precision
? OPC_SEL_F16
: OPC_SEL_F32
);
1010 vectorize(ctx
, instr
, dst
, 3,
1011 &constval0
, 0, tmp_src
, 0, &constval1
, 0);
1017 put_dst(ctx
, inst
, dst
);
1021 * Conditional / Flow control
1025 find_instruction(struct ir3_compile_context
*ctx
, struct ir3_instruction
*instr
)
1028 for (i
= 0; i
< ctx
->ir
->instrs_count
; i
++)
1029 if (ctx
->ir
->instrs
[i
] == instr
)
1035 push_branch(struct ir3_compile_context
*ctx
, struct ir3_instruction
*instr
)
1037 ctx
->branch
[ctx
->branch_count
++] = instr
;
1041 pop_branch(struct ir3_compile_context
*ctx
)
1043 struct ir3_instruction
*instr
;
1045 /* if we were clever enough, we'd patch this up after the fact,
1046 * and set (jp) flag on whatever the next instruction was, rather
1047 * than inserting an extra nop..
1049 instr
= instr_create(ctx
, 0, OPC_NOP
);
1050 instr
->flags
|= IR3_INSTR_JP
;
1052 /* pop the branch instruction from the stack and fix up branch target: */
1053 instr
= ctx
->branch
[--ctx
->branch_count
];
1054 instr
->cat0
.immed
= ctx
->ir
->instrs_count
- find_instruction(ctx
, instr
) - 1;
1057 /* We probably don't really want to translate if/else/endif into branches..
1058 * the blob driver evaluates both legs of the if and then uses the sel
1059 * instruction to pick which sides of the branch to "keep".. but figuring
1060 * that out will take somewhat more compiler smarts. So hopefully branches
1061 * don't kill performance too badly.
1064 trans_if(const struct instr_translater
*t
,
1065 struct ir3_compile_context
*ctx
,
1066 struct tgsi_full_instruction
*inst
)
1068 struct ir3_instruction
*instr
;
1069 struct tgsi_src_register
*src
= &inst
->Src
[0].Register
;
1070 struct tgsi_src_register constval
;
1072 get_immediate(ctx
, &constval
, fui(0.0));
1075 src
= get_unconst(ctx
, src
);
1077 instr
= instr_create(ctx
, 2, OPC_CMPS_F
);
1078 ir3_reg_create(instr
, regid(REG_P0
, 0), 0);
1079 add_src_reg(ctx
, instr
, src
, src
->SwizzleX
);
1080 add_src_reg(ctx
, instr
, &constval
, constval
.SwizzleX
);
1081 instr
->cat2
.condition
= IR3_COND_EQ
;
1083 instr
= instr_create(ctx
, 0, OPC_BR
);
1084 push_branch(ctx
, instr
);
1088 trans_else(const struct instr_translater
*t
,
1089 struct ir3_compile_context
*ctx
,
1090 struct tgsi_full_instruction
*inst
)
1092 struct ir3_instruction
*instr
;
1094 /* for first half of if/else/endif, generate a jump past the else: */
1095 instr
= instr_create(ctx
, 0, OPC_JUMP
);
1098 push_branch(ctx
, instr
);
1102 trans_endif(const struct instr_translater
*t
,
1103 struct ir3_compile_context
*ctx
,
1104 struct tgsi_full_instruction
*inst
)
1110 * Handlers for TGSI instructions which do have 1:1 mapping to native
1115 instr_cat0(const struct instr_translater
*t
,
1116 struct ir3_compile_context
*ctx
,
1117 struct tgsi_full_instruction
*inst
)
1119 instr_create(ctx
, 0, t
->opc
);
1123 instr_cat1(const struct instr_translater
*t
,
1124 struct ir3_compile_context
*ctx
,
1125 struct tgsi_full_instruction
*inst
)
1127 struct tgsi_dst_register
*dst
= get_dst(ctx
, inst
);
1128 struct tgsi_src_register
*src
= &inst
->Src
[0].Register
;
1130 /* mov instructions can't handle a negate on src: */
1132 struct tgsi_src_register constval
;
1133 struct ir3_instruction
*instr
;
1135 /* since right now, we are using uniformly either TYPE_F16 or
1136 * TYPE_F32, and we don't utilize the conversion possibilities
1137 * of mov instructions, we can get away with substituting an
1138 * add.f which can handle negate. Might need to revisit this
1139 * in the future if we start supporting widening/narrowing or
1140 * conversion to/from integer..
1142 instr
= instr_create(ctx
, 2, OPC_ADD_F
);
1143 get_immediate(ctx
, &constval
, fui(0.0));
1144 vectorize(ctx
, instr
, dst
, 2, src
, 0, &constval
, 0);
1146 create_mov(ctx
, dst
, src
);
1147 /* create_mov() generates vector sequence, so no vectorize() */
1149 put_dst(ctx
, inst
, dst
);
1153 instr_cat2(const struct instr_translater
*t
,
1154 struct ir3_compile_context
*ctx
,
1155 struct tgsi_full_instruction
*inst
)
1157 struct tgsi_dst_register
*dst
= get_dst(ctx
, inst
);
1158 struct tgsi_src_register
*src0
= &inst
->Src
[0].Register
;
1159 struct tgsi_src_register
*src1
= &inst
->Src
[1].Register
;
1160 struct ir3_instruction
*instr
;
1161 unsigned src0_flags
= 0, src1_flags
= 0;
1163 switch (t
->tgsi_opc
) {
1164 case TGSI_OPCODE_ABS
:
1165 src0_flags
= IR3_REG_ABS
;
1167 case TGSI_OPCODE_SUB
:
1168 src1_flags
= IR3_REG_NEGATE
;
1187 /* these only have one src reg */
1188 instr
= instr_create(ctx
, 2, t
->opc
);
1189 vectorize(ctx
, instr
, dst
, 1, src0
, src0_flags
);
1192 if (is_const(src0
) && is_const(src1
))
1193 src0
= get_unconst(ctx
, src0
);
1195 instr
= instr_create(ctx
, 2, t
->opc
);
1196 vectorize(ctx
, instr
, dst
, 2, src0
, src0_flags
,
1201 put_dst(ctx
, inst
, dst
);
1205 instr_cat3(const struct instr_translater
*t
,
1206 struct ir3_compile_context
*ctx
,
1207 struct tgsi_full_instruction
*inst
)
1209 struct tgsi_dst_register
*dst
= get_dst(ctx
, inst
);
1210 struct tgsi_src_register
*src0
= &inst
->Src
[0].Register
;
1211 struct tgsi_src_register
*src1
= &inst
->Src
[1].Register
;
1212 struct ir3_instruction
*instr
;
1214 /* in particular, can't handle const for src1 for cat3..
1215 * for mad, we can swap first two src's if needed:
1217 if (is_rel_or_const(src1
)) {
1218 if (is_mad(t
->opc
) && !is_rel_or_const(src0
)) {
1219 struct tgsi_src_register
*tmp
;
1224 src1
= get_unconst(ctx
, src1
);
1228 instr
= instr_create(ctx
, 3,
1229 ctx
->so
->key
.half_precision
? t
->hopc
: t
->opc
);
1230 vectorize(ctx
, instr
, dst
, 3, src0
, 0, src1
, 0,
1231 &inst
->Src
[2].Register
, 0);
1232 put_dst(ctx
, inst
, dst
);
1236 instr_cat4(const struct instr_translater
*t
,
1237 struct ir3_compile_context
*ctx
,
1238 struct tgsi_full_instruction
*inst
)
1240 struct tgsi_dst_register
*dst
= get_dst(ctx
, inst
);
1241 struct tgsi_src_register
*src
= &inst
->Src
[0].Register
;
1242 struct ir3_instruction
*instr
;
1245 /* seems like blob compiler avoids const as src.. */
1247 src
= get_unconst(ctx
, src
);
1252 /* we need to replicate into each component: */
1253 for (i
= 0, n
= 0; i
< 4; i
++) {
1254 if (dst
->WriteMask
& (1 << i
)) {
1257 instr
= instr_create(ctx
, 4, t
->opc
);
1258 add_dst_reg(ctx
, instr
, dst
, i
);
1259 add_src_reg(ctx
, instr
, src
, src
->SwizzleX
);
1263 regmask_set(&ctx
->needs_ss
, instr
->regs
[0]);
1264 put_dst(ctx
, inst
, dst
);
1267 static const struct instr_translater translaters
[TGSI_OPCODE_LAST
] = {
1268 #define INSTR(n, f, ...) \
1269 [TGSI_OPCODE_ ## n] = { .fxn = (f), .tgsi_opc = TGSI_OPCODE_ ## n, ##__VA_ARGS__ }
1271 INSTR(MOV
, instr_cat1
),
1272 INSTR(RCP
, instr_cat4
, .opc
= OPC_RCP
),
1273 INSTR(RSQ
, instr_cat4
, .opc
= OPC_RSQ
),
1274 INSTR(SQRT
, instr_cat4
, .opc
= OPC_SQRT
),
1275 INSTR(MUL
, instr_cat2
, .opc
= OPC_MUL_F
),
1276 INSTR(ADD
, instr_cat2
, .opc
= OPC_ADD_F
),
1277 INSTR(SUB
, instr_cat2
, .opc
= OPC_ADD_F
),
1278 INSTR(MIN
, instr_cat2
, .opc
= OPC_MIN_F
),
1279 INSTR(MAX
, instr_cat2
, .opc
= OPC_MAX_F
),
1280 INSTR(MAD
, instr_cat3
, .opc
= OPC_MAD_F32
, .hopc
= OPC_MAD_F16
),
1281 INSTR(TRUNC
, instr_cat2
, .opc
= OPC_TRUNC_F
),
1282 INSTR(CLAMP
, trans_clamp
),
1283 INSTR(FLR
, instr_cat2
, .opc
= OPC_FLOOR_F
),
1284 INSTR(ROUND
, instr_cat2
, .opc
= OPC_RNDNE_F
),
1285 INSTR(SSG
, instr_cat2
, .opc
= OPC_SIGN_F
),
1286 INSTR(ARL
, trans_arl
),
1287 INSTR(EX2
, instr_cat4
, .opc
= OPC_EXP2
),
1288 INSTR(LG2
, instr_cat4
, .opc
= OPC_LOG2
),
1289 INSTR(ABS
, instr_cat2
, .opc
= OPC_ABSNEG_F
),
1290 INSTR(COS
, instr_cat4
, .opc
= OPC_COS
),
1291 INSTR(SIN
, instr_cat4
, .opc
= OPC_SIN
),
1292 INSTR(TEX
, trans_samp
, .opc
= OPC_SAM
, .arg
= TGSI_OPCODE_TEX
),
1293 INSTR(TXP
, trans_samp
, .opc
= OPC_SAM
, .arg
= TGSI_OPCODE_TXP
),
1294 INSTR(SGT
, trans_cmp
),
1295 INSTR(SLT
, trans_cmp
),
1296 INSTR(SGE
, trans_cmp
),
1297 INSTR(SLE
, trans_cmp
),
1298 INSTR(SNE
, trans_cmp
),
1299 INSTR(SEQ
, trans_cmp
),
1300 INSTR(CMP
, trans_cmp
),
1301 INSTR(IF
, trans_if
),
1302 INSTR(ELSE
, trans_else
),
1303 INSTR(ENDIF
, trans_endif
),
1304 INSTR(END
, instr_cat0
, .opc
= OPC_END
),
1305 INSTR(KILL
, instr_cat0
, .opc
= OPC_KILL
),
1309 decl_semantic(const struct tgsi_declaration_semantic
*sem
)
1311 return ir3_semantic_name(sem
->Name
, sem
->Index
);
1315 decl_in(struct ir3_compile_context
*ctx
, struct tgsi_full_declaration
*decl
)
1317 struct ir3_shader_variant
*so
= ctx
->so
;
1318 unsigned base
= ctx
->base_reg
[TGSI_FILE_INPUT
];
1319 unsigned i
, flags
= 0;
1322 /* I don't think we should get frag shader input without
1323 * semantic info? Otherwise how do inputs get linked to
1326 compile_assert(ctx
, (ctx
->type
== TGSI_PROCESSOR_VERTEX
) ||
1327 decl
->Declaration
.Semantic
);
1329 if (ctx
->so
->key
.half_precision
)
1330 flags
|= IR3_REG_HALF
;
1332 for (i
= decl
->Range
.First
; i
<= decl
->Range
.Last
; i
++) {
1333 unsigned n
= so
->inputs_count
++;
1334 unsigned r
= regid(i
+ base
, 0);
1337 /* TODO use ctx->info.input_usage_mask[decl->Range.n] to figure out ncomp: */
1340 DBG("decl in -> r%d", i
+ base
); // XXX
1342 compile_assert(ctx
, n
< ARRAY_SIZE(so
->inputs
));
1344 so
->inputs
[n
].semantic
= decl_semantic(&decl
->Semantic
);
1345 so
->inputs
[n
].compmask
= (1 << ncomp
) - 1;
1346 so
->inputs
[n
].ncomp
= ncomp
;
1347 so
->inputs
[n
].regid
= r
;
1348 so
->inputs
[n
].inloc
= ctx
->next_inloc
;
1349 so
->inputs
[n
].bary
= true; /* all that is supported */
1350 ctx
->next_inloc
+= ncomp
;
1352 so
->total_in
+= ncomp
;
1354 /* for frag shaders, we need to generate the corresponding bary instr: */
1355 if (ctx
->type
== TGSI_PROCESSOR_FRAGMENT
) {
1358 for (j
= 0; j
< ncomp
; j
++) {
1359 struct ir3_instruction
*instr
;
1360 struct ir3_register
*dst
;
1362 instr
= instr_create(ctx
, 2, OPC_BARY_F
);
1365 dst
= ir3_reg_create(instr
, r
+ j
, flags
);
1366 ctx
->last_input
= dst
;
1368 /* input position: */
1369 ir3_reg_create(instr
, 0, IR3_REG_IMMED
)->iim_val
=
1370 so
->inputs
[n
].inloc
+ j
- 8;
1372 /* input base (always r0.xy): */
1373 ir3_reg_create(instr
, regid(0,0), 0)->wrmask
= 0x3;
1384 decl_out(struct ir3_compile_context
*ctx
, struct tgsi_full_declaration
*decl
)
1386 struct ir3_shader_variant
*so
= ctx
->so
;
1387 unsigned base
= ctx
->base_reg
[TGSI_FILE_OUTPUT
];
1389 unsigned name
= decl
->Semantic
.Name
;
1392 compile_assert(ctx
, decl
->Declaration
.Semantic
); // TODO is this ever not true?
1394 DBG("decl out[%d] -> r%d", name
, decl
->Range
.First
+ base
); // XXX
1396 if (ctx
->type
== TGSI_PROCESSOR_VERTEX
) {
1398 case TGSI_SEMANTIC_POSITION
:
1399 so
->writes_pos
= true;
1401 case TGSI_SEMANTIC_PSIZE
:
1402 so
->writes_psize
= true;
1404 case TGSI_SEMANTIC_COLOR
:
1405 case TGSI_SEMANTIC_BCOLOR
:
1406 case TGSI_SEMANTIC_GENERIC
:
1407 case TGSI_SEMANTIC_FOG
:
1408 case TGSI_SEMANTIC_TEXCOORD
:
1411 compile_error(ctx
, "unknown VS semantic name: %s\n",
1412 tgsi_semantic_names
[name
]);
1416 case TGSI_SEMANTIC_POSITION
:
1417 comp
= 2; /* tgsi will write to .z component */
1418 so
->writes_pos
= true;
1420 case TGSI_SEMANTIC_COLOR
:
1423 compile_error(ctx
, "unknown FS semantic name: %s\n",
1424 tgsi_semantic_names
[name
]);
1428 for (i
= decl
->Range
.First
; i
<= decl
->Range
.Last
; i
++) {
1429 unsigned n
= so
->outputs_count
++;
1430 compile_assert(ctx
, n
< ARRAY_SIZE(so
->outputs
));
1431 so
->outputs
[n
].semantic
= decl_semantic(&decl
->Semantic
);
1432 so
->outputs
[n
].regid
= regid(i
+ base
, comp
);
1437 decl_samp(struct ir3_compile_context
*ctx
, struct tgsi_full_declaration
*decl
)
1439 ctx
->so
->has_samp
= true;
1443 compile_instructions(struct ir3_compile_context
*ctx
)
1445 struct ir3
*ir
= ctx
->ir
;
1448 while (!tgsi_parse_end_of_tokens(&ctx
->parser
)) {
1449 tgsi_parse_token(&ctx
->parser
);
1451 switch (ctx
->parser
.FullToken
.Token
.Type
) {
1452 case TGSI_TOKEN_TYPE_DECLARATION
: {
1453 struct tgsi_full_declaration
*decl
=
1454 &ctx
->parser
.FullToken
.FullDeclaration
;
1455 if (decl
->Declaration
.File
== TGSI_FILE_OUTPUT
) {
1456 decl_out(ctx
, decl
);
1457 } else if (decl
->Declaration
.File
== TGSI_FILE_INPUT
) {
1458 nop
= decl_in(ctx
, decl
);
1459 } else if (decl
->Declaration
.File
== TGSI_FILE_SAMPLER
) {
1460 decl_samp(ctx
, decl
);
1464 case TGSI_TOKEN_TYPE_IMMEDIATE
: {
1465 /* TODO: if we know the immediate is small enough, and only
1466 * used with instructions that can embed an immediate, we
1469 struct tgsi_full_immediate
*imm
=
1470 &ctx
->parser
.FullToken
.FullImmediate
;
1471 unsigned n
= ctx
->so
->immediates_count
++;
1472 memcpy(ctx
->so
->immediates
[n
].val
, imm
->u
, 16);
1475 case TGSI_TOKEN_TYPE_INSTRUCTION
: {
1476 struct tgsi_full_instruction
*inst
=
1477 &ctx
->parser
.FullToken
.FullInstruction
;
1478 unsigned opc
= inst
->Instruction
.Opcode
;
1479 const struct instr_translater
*t
= &translaters
[opc
];
1485 t
->fxn(t
, ctx
, inst
);
1486 ctx
->num_internal_temps
= 0;
1488 compile_error(ctx
, "unknown TGSI opc: %s\n",
1489 tgsi_get_opcode_name(opc
));
1492 switch (inst
->Instruction
.Saturate
) {
1493 case TGSI_SAT_ZERO_ONE
:
1494 create_clamp_imm(ctx
, &inst
->Dst
[0].Register
,
1495 fui(0.0), fui(1.0));
1497 case TGSI_SAT_MINUS_PLUS_ONE
:
1498 create_clamp_imm(ctx
, &inst
->Dst
[0].Register
,
1499 fui(-1.0), fui(1.0));
1510 if (ir
->instrs_count
> 0)
1511 ir
->instrs
[0]->flags
|= IR3_INSTR_SS
| IR3_INSTR_SY
;
1513 if (ctx
->last_input
)
1514 ctx
->last_input
->flags
|= IR3_REG_EI
;
1516 handle_last_rel(ctx
);
1520 ir3_compile_shader_old(struct ir3_shader_variant
*so
,
1521 const struct tgsi_token
*tokens
, struct ir3_shader_key key
)
1523 struct ir3_compile_context ctx
;
1527 so
->ir
= ir3_create();
1531 if (compile_init(&ctx
, so
, tokens
) != TGSI_PARSE_OK
)
1534 compile_instructions(&ctx
);