1a5119c074f7f1ceec3e79bc597fabe0d4fa25af
[mesa.git] / src / gallium / drivers / freedreno / ir3 / ir3_compiler.c
1 /* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
2
3 /*
4 * Copyright (C) 2013 Rob Clark <robclark@freedesktop.org>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * SOFTWARE.
24 *
25 * Authors:
26 * Rob Clark <robclark@freedesktop.org>
27 */
28
29 #include <stdarg.h>
30
31 #include "pipe/p_state.h"
32 #include "util/u_string.h"
33 #include "util/u_memory.h"
34 #include "util/u_inlines.h"
35 #include "tgsi/tgsi_lowering.h"
36 #include "tgsi/tgsi_parse.h"
37 #include "tgsi/tgsi_ureg.h"
38 #include "tgsi/tgsi_info.h"
39 #include "tgsi/tgsi_strings.h"
40 #include "tgsi/tgsi_dump.h"
41 #include "tgsi/tgsi_scan.h"
42
43 #include "freedreno_util.h"
44
45 #include "ir3_compiler.h"
46 #include "ir3_shader.h"
47
48 #include "instr-a3xx.h"
49 #include "ir3.h"
50
51 struct ir3_compile_context {
52 const struct tgsi_token *tokens;
53 bool free_tokens;
54 struct ir3 *ir;
55 struct ir3_shader_variant *so;
56
57 struct ir3_block *block;
58 struct ir3_instruction *current_instr;
59
60 /* we need to defer updates to block->outputs[] until the end
61 * of an instruction (so we don't see new value until *after*
62 * the src registers are processed)
63 */
64 struct {
65 struct ir3_instruction *instr, **instrp;
66 } output_updates[16];
67 unsigned num_output_updates;
68
69 /* are we in a sequence of "atomic" instructions?
70 */
71 bool atomic;
72
73 /* For fragment shaders, from the hw perspective the only
74 * actual input is r0.xy position register passed to bary.f.
75 * But TGSI doesn't know that, it still declares things as
76 * IN[] registers. So we do all the input tracking normally
77 * and fix things up after compile_instructions()
78 *
79 * NOTE that frag_pos is the hardware position (possibly it
80 * is actually an index or tag or some such.. it is *not*
81 * values that can be directly used for gl_FragCoord..)
82 */
83 struct ir3_instruction *frag_pos, *frag_face, *frag_coord[4];
84
85 struct tgsi_parse_context parser;
86 unsigned type;
87
88 struct tgsi_shader_info info;
89
90 /* for calculating input/output positions/linkages: */
91 unsigned next_inloc;
92
93 unsigned num_internal_temps;
94 struct tgsi_src_register internal_temps[8];
95
96 /* idx/slot for last compiler generated immediate */
97 unsigned immediate_idx;
98
99 /* stack of branch instructions that mark (potentially nested)
100 * branch if/else/loop/etc
101 */
102 struct {
103 struct ir3_instruction *instr, *cond;
104 bool inv; /* true iff in else leg of branch */
105 } branch[16];
106 unsigned int branch_count;
107
108 /* list of kill instructions: */
109 struct ir3_instruction *kill[16];
110 unsigned int kill_count;
111
112 /* used when dst is same as one of the src, to avoid overwriting a
113 * src element before the remaining scalar instructions that make
114 * up the vector operation
115 */
116 struct tgsi_dst_register tmp_dst;
117 struct tgsi_src_register *tmp_src;
118
119 /* just for catching incorrect use of get_dst()/put_dst():
120 */
121 bool using_tmp_dst;
122 };
123
124
125 static void vectorize(struct ir3_compile_context *ctx,
126 struct ir3_instruction *instr, struct tgsi_dst_register *dst,
127 int nsrcs, ...);
128 static void create_mov(struct ir3_compile_context *ctx,
129 struct tgsi_dst_register *dst, struct tgsi_src_register *src);
130 static type_t get_ftype(struct ir3_compile_context *ctx);
131
132 static unsigned
133 compile_init(struct ir3_compile_context *ctx, struct ir3_shader_variant *so,
134 const struct tgsi_token *tokens)
135 {
136 unsigned ret;
137 struct tgsi_shader_info *info = &ctx->info;
138 struct tgsi_lowering_config lconfig = {
139 .color_two_side = so->key.color_two_side,
140 .lower_DST = true,
141 .lower_XPD = true,
142 .lower_SCS = true,
143 .lower_LRP = true,
144 .lower_FRC = true,
145 .lower_POW = true,
146 .lower_LIT = true,
147 .lower_EXP = true,
148 .lower_LOG = true,
149 .lower_DP4 = true,
150 .lower_DP3 = true,
151 .lower_DPH = true,
152 .lower_DP2 = true,
153 .lower_DP2A = true,
154 };
155
156 switch (so->type) {
157 case SHADER_FRAGMENT:
158 case SHADER_COMPUTE:
159 lconfig.saturate_s = so->key.fsaturate_s;
160 lconfig.saturate_t = so->key.fsaturate_t;
161 lconfig.saturate_r = so->key.fsaturate_r;
162 break;
163 case SHADER_VERTEX:
164 lconfig.saturate_s = so->key.vsaturate_s;
165 lconfig.saturate_t = so->key.vsaturate_t;
166 lconfig.saturate_r = so->key.vsaturate_r;
167 break;
168 }
169
170 ctx->tokens = tgsi_transform_lowering(&lconfig, tokens, &ctx->info);
171 ctx->free_tokens = !!ctx->tokens;
172 if (!ctx->tokens) {
173 /* no lowering */
174 ctx->tokens = tokens;
175 }
176 ctx->ir = so->ir;
177 ctx->so = so;
178 ctx->next_inloc = 8;
179 ctx->num_internal_temps = 0;
180 ctx->branch_count = 0;
181 ctx->kill_count = 0;
182 ctx->block = NULL;
183 ctx->current_instr = NULL;
184 ctx->num_output_updates = 0;
185 ctx->atomic = false;
186 ctx->frag_pos = NULL;
187 ctx->frag_face = NULL;
188 ctx->tmp_src = NULL;
189 ctx->using_tmp_dst = false;
190
191 memset(ctx->frag_coord, 0, sizeof(ctx->frag_coord));
192
193 #define FM(x) (1 << TGSI_FILE_##x)
194 /* optimize can't deal with relative addressing: */
195 if (info->indirect_files & (FM(TEMPORARY) | FM(INPUT) | FM(OUTPUT)))
196 return TGSI_PARSE_ERROR;
197
198 /* NOTE: if relative addressing is used, we set constlen in
199 * the compiler (to worst-case value) since we don't know in
200 * the assembler what the max addr reg value can be:
201 */
202 if (info->indirect_files & FM(CONSTANT))
203 so->constlen = 4 * (ctx->info.file_max[TGSI_FILE_CONSTANT] + 1);
204
205 /* Immediates go after constants: */
206 so->first_immediate = info->file_max[TGSI_FILE_CONSTANT] + 1;
207 ctx->immediate_idx = 4 * (ctx->info.file_max[TGSI_FILE_IMMEDIATE] + 1);
208
209 ret = tgsi_parse_init(&ctx->parser, ctx->tokens);
210 if (ret != TGSI_PARSE_OK)
211 return ret;
212
213 ctx->type = ctx->parser.FullHeader.Processor.Processor;
214
215 return ret;
216 }
217
218 static void
219 compile_error(struct ir3_compile_context *ctx, const char *format, ...)
220 {
221 va_list ap;
222 va_start(ap, format);
223 _debug_vprintf(format, ap);
224 va_end(ap);
225 tgsi_dump(ctx->tokens, 0);
226 debug_assert(0);
227 }
228
229 #define compile_assert(ctx, cond) do { \
230 if (!(cond)) compile_error((ctx), "failed assert: "#cond"\n"); \
231 } while (0)
232
233 static void
234 compile_free(struct ir3_compile_context *ctx)
235 {
236 if (ctx->free_tokens)
237 free((void *)ctx->tokens);
238 tgsi_parse_free(&ctx->parser);
239 }
240
241 struct instr_translater {
242 void (*fxn)(const struct instr_translater *t,
243 struct ir3_compile_context *ctx,
244 struct tgsi_full_instruction *inst);
245 unsigned tgsi_opc;
246 opc_t opc;
247 opc_t hopc; /* opc to use for half_precision mode, if different */
248 unsigned arg;
249 };
250
251 static void
252 instr_finish(struct ir3_compile_context *ctx)
253 {
254 unsigned i;
255
256 if (ctx->atomic)
257 return;
258
259 for (i = 0; i < ctx->num_output_updates; i++)
260 *(ctx->output_updates[i].instrp) = ctx->output_updates[i].instr;
261
262 ctx->num_output_updates = 0;
263 }
264
265 /* For "atomic" groups of instructions, for example the four scalar
266 * instructions to perform a vec4 operation. Basically this just
267 * blocks out handling of output_updates so the next scalar instruction
268 * still sees the result from before the start of the atomic group.
269 *
270 * NOTE: when used properly, this could probably replace get/put_dst()
271 * stuff.
272 */
273 static void
274 instr_atomic_start(struct ir3_compile_context *ctx)
275 {
276 ctx->atomic = true;
277 }
278
279 static void
280 instr_atomic_end(struct ir3_compile_context *ctx)
281 {
282 ctx->atomic = false;
283 instr_finish(ctx);
284 }
285
286 static struct ir3_instruction *
287 instr_create(struct ir3_compile_context *ctx, int category, opc_t opc)
288 {
289 instr_finish(ctx);
290 return (ctx->current_instr = ir3_instr_create(ctx->block, category, opc));
291 }
292
293 static struct ir3_instruction *
294 instr_clone(struct ir3_compile_context *ctx, struct ir3_instruction *instr)
295 {
296 instr_finish(ctx);
297 return (ctx->current_instr = ir3_instr_clone(instr));
298 }
299
300 static struct ir3_block *
301 push_block(struct ir3_compile_context *ctx)
302 {
303 struct ir3_block *block;
304 unsigned ntmp, nin, nout;
305
306 #define SCALAR_REGS(file) (4 * (ctx->info.file_max[TGSI_FILE_ ## file] + 1))
307
308 /* hmm, give ourselves room to create 8 extra temporaries (vec4):
309 */
310 ntmp = SCALAR_REGS(TEMPORARY);
311 ntmp += 8 * 4;
312
313 nout = SCALAR_REGS(OUTPUT);
314 nin = SCALAR_REGS(INPUT);
315
316 /* for outermost block, 'inputs' are the actual shader INPUT
317 * register file. Reads from INPUT registers always go back to
318 * top block. For nested blocks, 'inputs' is used to track any
319 * TEMPORARY file register from one of the enclosing blocks that
320 * is ready in this block.
321 */
322 if (!ctx->block) {
323 /* NOTE: fragment shaders actually have two inputs (r0.xy, the
324 * position)
325 */
326 if (ctx->type == TGSI_PROCESSOR_FRAGMENT) {
327 int n = 2;
328 if (ctx->info.reads_position)
329 n += 4;
330 if (ctx->info.uses_frontface)
331 n += 4;
332 nin = MAX2(n, nin);
333 nout += ARRAY_SIZE(ctx->kill);
334 }
335 } else {
336 nin = ntmp;
337 }
338
339 block = ir3_block_create(ctx->ir, ntmp, nin, nout);
340
341 if ((ctx->type == TGSI_PROCESSOR_FRAGMENT) && !ctx->block)
342 block->noutputs -= ARRAY_SIZE(ctx->kill);
343
344 block->parent = ctx->block;
345 ctx->block = block;
346
347 return block;
348 }
349
350 static void
351 pop_block(struct ir3_compile_context *ctx)
352 {
353 ctx->block = ctx->block->parent;
354 compile_assert(ctx, ctx->block);
355 }
356
357 static struct ir3_instruction *
358 create_output(struct ir3_block *block, struct ir3_instruction *instr,
359 unsigned n)
360 {
361 struct ir3_instruction *out;
362
363 out = ir3_instr_create(block, -1, OPC_META_OUTPUT);
364 out->inout.block = block;
365 ir3_reg_create(out, n, 0);
366 if (instr)
367 ir3_reg_create(out, 0, IR3_REG_SSA)->instr = instr;
368
369 return out;
370 }
371
372 static struct ir3_instruction *
373 create_input(struct ir3_block *block, struct ir3_instruction *instr,
374 unsigned n)
375 {
376 struct ir3_instruction *in;
377
378 in = ir3_instr_create(block, -1, OPC_META_INPUT);
379 in->inout.block = block;
380 ir3_reg_create(in, n, 0);
381 if (instr)
382 ir3_reg_create(in, 0, IR3_REG_SSA)->instr = instr;
383
384 return in;
385 }
386
387 static struct ir3_instruction *
388 block_input(struct ir3_block *block, unsigned n)
389 {
390 /* references to INPUT register file always go back up to
391 * top level:
392 */
393 if (block->parent)
394 return block_input(block->parent, n);
395 return block->inputs[n];
396 }
397
398 /* return temporary in scope, creating if needed meta-input node
399 * to track block inputs
400 */
401 static struct ir3_instruction *
402 block_temporary(struct ir3_block *block, unsigned n)
403 {
404 /* references to TEMPORARY register file, find the nearest
405 * enclosing block which has already assigned this temporary,
406 * creating meta-input instructions along the way to keep
407 * track of block inputs
408 */
409 if (block->parent && !block->temporaries[n]) {
410 /* if already have input for this block, reuse: */
411 if (!block->inputs[n])
412 block->inputs[n] = block_temporary(block->parent, n);
413
414 /* and create new input to return: */
415 return create_input(block, block->inputs[n], n);
416 }
417 return block->temporaries[n];
418 }
419
420 static struct ir3_instruction *
421 create_immed(struct ir3_compile_context *ctx, float val)
422 {
423 /* NOTE: *don't* use instr_create() here!
424 */
425 struct ir3_instruction *instr;
426 instr = ir3_instr_create(ctx->block, 1, 0);
427 instr->cat1.src_type = get_ftype(ctx);
428 instr->cat1.dst_type = get_ftype(ctx);
429 ir3_reg_create(instr, 0, 0);
430 ir3_reg_create(instr, 0, IR3_REG_IMMED)->fim_val = val;
431 return instr;
432 }
433
434 static void
435 ssa_dst(struct ir3_compile_context *ctx, struct ir3_instruction *instr,
436 const struct tgsi_dst_register *dst, unsigned chan)
437 {
438 unsigned n = regid(dst->Index, chan);
439 unsigned idx = ctx->num_output_updates;
440
441 compile_assert(ctx, idx < ARRAY_SIZE(ctx->output_updates));
442
443 /* NOTE: defer update of temporaries[idx] or output[idx]
444 * until instr_finish(), so that if the current instruction
445 * reads the same TEMP/OUT[] it gets the old value:
446 *
447 * bleh.. this might be a bit easier to just figure out
448 * in instr_finish(). But at that point we've already
449 * lost information about OUTPUT vs TEMPORARY register
450 * file..
451 */
452
453 switch (dst->File) {
454 case TGSI_FILE_OUTPUT:
455 compile_assert(ctx, n < ctx->block->noutputs);
456 ctx->output_updates[idx].instrp = &ctx->block->outputs[n];
457 ctx->output_updates[idx].instr = instr;
458 ctx->num_output_updates++;
459 break;
460 case TGSI_FILE_TEMPORARY:
461 compile_assert(ctx, n < ctx->block->ntemporaries);
462 ctx->output_updates[idx].instrp = &ctx->block->temporaries[n];
463 ctx->output_updates[idx].instr = instr;
464 ctx->num_output_updates++;
465 break;
466 case TGSI_FILE_ADDRESS:
467 compile_assert(ctx, n < 1);
468 ctx->output_updates[idx].instrp = &ctx->block->address;
469 ctx->output_updates[idx].instr = instr;
470 ctx->num_output_updates++;
471 break;
472 }
473 }
474
475 static void
476 ssa_src(struct ir3_compile_context *ctx, struct ir3_register *reg,
477 const struct tgsi_src_register *src, unsigned chan)
478 {
479 struct ir3_block *block = ctx->block;
480 unsigned n = regid(src->Index, chan);
481
482 switch (src->File) {
483 case TGSI_FILE_INPUT:
484 reg->flags |= IR3_REG_SSA;
485 reg->instr = block_input(ctx->block, n);
486 break;
487 case TGSI_FILE_OUTPUT:
488 /* really this should just happen in case of 'MOV_SAT OUT[n], ..',
489 * for the following clamp instructions:
490 */
491 reg->flags |= IR3_REG_SSA;
492 reg->instr = block->outputs[n];
493 /* we don't have to worry about read from an OUTPUT that was
494 * assigned outside of the current block, because the _SAT
495 * clamp instructions will always be in the same block as
496 * the original instruction which wrote the OUTPUT
497 */
498 compile_assert(ctx, reg->instr);
499 break;
500 case TGSI_FILE_TEMPORARY:
501 reg->flags |= IR3_REG_SSA;
502 reg->instr = block_temporary(ctx->block, n);
503 break;
504 }
505
506 if ((reg->flags & IR3_REG_SSA) && !reg->instr) {
507 /* this can happen when registers (or components of a TGSI
508 * register) are used as src before they have been assigned
509 * (undefined contents). To avoid confusing the rest of the
510 * compiler, and to generally keep things peachy, substitute
511 * an instruction that sets the src to 0.0. Or to keep
512 * things undefined, I could plug in a random number? :-P
513 *
514 * NOTE: *don't* use instr_create() here!
515 */
516 reg->instr = create_immed(ctx, 0.0);
517 }
518 }
519
520 static struct ir3_register *
521 add_dst_reg_wrmask(struct ir3_compile_context *ctx,
522 struct ir3_instruction *instr, const struct tgsi_dst_register *dst,
523 unsigned chan, unsigned wrmask)
524 {
525 unsigned flags = 0, num = 0;
526 struct ir3_register *reg;
527
528 switch (dst->File) {
529 case TGSI_FILE_OUTPUT:
530 case TGSI_FILE_TEMPORARY:
531 /* uses SSA */
532 break;
533 case TGSI_FILE_ADDRESS:
534 flags |= IR3_REG_ADDR;
535 /* uses SSA */
536 break;
537 default:
538 compile_error(ctx, "unsupported dst register file: %s\n",
539 tgsi_file_name(dst->File));
540 break;
541 }
542
543 if (dst->Indirect)
544 flags |= IR3_REG_RELATIV;
545
546 reg = ir3_reg_create(instr, regid(num, chan), flags);
547
548 /* NOTE: do not call ssa_dst() if atomic.. vectorize()
549 * itself will call ssa_dst(). This is to filter out
550 * the (initially bogus) .x component dst which is
551 * created (but not necessarily used, ie. if the net
552 * result of the vector operation does not write to
553 * the .x component)
554 */
555
556 reg->wrmask = wrmask;
557 if (wrmask == 0x1) {
558 /* normal case */
559 if (!ctx->atomic)
560 ssa_dst(ctx, instr, dst, chan);
561 } else if ((dst->File == TGSI_FILE_TEMPORARY) ||
562 (dst->File == TGSI_FILE_OUTPUT) ||
563 (dst->File == TGSI_FILE_ADDRESS)) {
564 unsigned i;
565
566 /* if instruction writes multiple, we need to create
567 * some place-holder collect the registers:
568 */
569 for (i = 0; i < 4; i++) {
570 if (wrmask & (1 << i)) {
571 struct ir3_instruction *collect =
572 ir3_instr_create(ctx->block, -1, OPC_META_FO);
573 collect->fo.off = i;
574 /* unused dst reg: */
575 ir3_reg_create(collect, 0, 0);
576 /* and src reg used to hold original instr */
577 ir3_reg_create(collect, 0, IR3_REG_SSA)->instr = instr;
578 if (!ctx->atomic)
579 ssa_dst(ctx, collect, dst, chan+i);
580 }
581 }
582 }
583
584 return reg;
585 }
586
587 static struct ir3_register *
588 add_dst_reg(struct ir3_compile_context *ctx, struct ir3_instruction *instr,
589 const struct tgsi_dst_register *dst, unsigned chan)
590 {
591 return add_dst_reg_wrmask(ctx, instr, dst, chan, 0x1);
592 }
593
594 static struct ir3_register *
595 add_src_reg_wrmask(struct ir3_compile_context *ctx,
596 struct ir3_instruction *instr, const struct tgsi_src_register *src,
597 unsigned chan, unsigned wrmask)
598 {
599 unsigned flags = 0, num = 0;
600 struct ir3_register *reg;
601 struct ir3_instruction *orig = NULL;
602
603 /* TODO we need to use a mov to temp for const >= 64.. or maybe
604 * we could use relative addressing..
605 */
606 compile_assert(ctx, src->Index < 64);
607
608 switch (src->File) {
609 case TGSI_FILE_IMMEDIATE:
610 /* TODO if possible, use actual immediate instead of const.. but
611 * TGSI has vec4 immediates, we can only embed scalar (of limited
612 * size, depending on instruction..)
613 */
614 flags |= IR3_REG_CONST;
615 num = src->Index + ctx->so->first_immediate;
616 break;
617 case TGSI_FILE_CONSTANT:
618 flags |= IR3_REG_CONST;
619 num = src->Index;
620 break;
621 case TGSI_FILE_OUTPUT:
622 /* NOTE: we should only end up w/ OUTPUT file for things like
623 * clamp()'ing saturated dst instructions
624 */
625 case TGSI_FILE_INPUT:
626 case TGSI_FILE_TEMPORARY:
627 /* uses SSA */
628 break;
629 default:
630 compile_error(ctx, "unsupported src register file: %s\n",
631 tgsi_file_name(src->File));
632 break;
633 }
634
635 if (src->Absolute)
636 flags |= IR3_REG_ABS;
637 if (src->Negate)
638 flags |= IR3_REG_NEGATE;
639
640 if (src->Indirect) {
641 flags |= IR3_REG_RELATIV;
642
643 /* shouldn't happen, and we can't cope with it below: */
644 compile_assert(ctx, wrmask == 0x1);
645
646 /* wrap in a meta-deref to track both the src and address: */
647 orig = instr;
648
649 instr = ir3_instr_create(ctx->block, -1, OPC_META_DEREF);
650 ir3_reg_create(instr, 0, 0);
651 ir3_reg_create(instr, 0, IR3_REG_SSA)->instr = ctx->block->address;
652 }
653
654 reg = ir3_reg_create(instr, regid(num, chan), flags);
655
656 reg->wrmask = wrmask;
657 if (wrmask == 0x1) {
658 /* normal case */
659 ssa_src(ctx, reg, src, chan);
660 } else if ((src->File == TGSI_FILE_TEMPORARY) ||
661 (src->File == TGSI_FILE_OUTPUT) ||
662 (src->File == TGSI_FILE_INPUT)) {
663 struct ir3_instruction *collect;
664 unsigned i;
665
666 compile_assert(ctx, !src->Indirect);
667
668 /* if instruction reads multiple, we need to create
669 * some place-holder collect the registers:
670 */
671 collect = ir3_instr_create(ctx->block, -1, OPC_META_FI);
672 ir3_reg_create(collect, 0, 0); /* unused dst reg */
673
674 for (i = 0; i < 4; i++) {
675 if (wrmask & (1 << i)) {
676 /* and src reg used point to the original instr */
677 ssa_src(ctx, ir3_reg_create(collect, 0, IR3_REG_SSA),
678 src, chan + i);
679 } else if (wrmask & ~((i << i) - 1)) {
680 /* if any remaining components, then dummy
681 * placeholder src reg to fill in the blanks:
682 */
683 ir3_reg_create(collect, 0, 0);
684 }
685 }
686
687 reg->flags |= IR3_REG_SSA;
688 reg->instr = collect;
689 }
690
691 if (src->Indirect) {
692 reg = ir3_reg_create(orig, 0, flags | IR3_REG_SSA);
693 reg->instr = instr;
694 }
695 return reg;
696 }
697
698 static struct ir3_register *
699 add_src_reg(struct ir3_compile_context *ctx, struct ir3_instruction *instr,
700 const struct tgsi_src_register *src, unsigned chan)
701 {
702 return add_src_reg_wrmask(ctx, instr, src, chan, 0x1);
703 }
704
705 static void
706 src_from_dst(struct tgsi_src_register *src, struct tgsi_dst_register *dst)
707 {
708 src->File = dst->File;
709 src->Indirect = dst->Indirect;
710 src->Dimension = dst->Dimension;
711 src->Index = dst->Index;
712 src->Absolute = 0;
713 src->Negate = 0;
714 src->SwizzleX = TGSI_SWIZZLE_X;
715 src->SwizzleY = TGSI_SWIZZLE_Y;
716 src->SwizzleZ = TGSI_SWIZZLE_Z;
717 src->SwizzleW = TGSI_SWIZZLE_W;
718 }
719
720 /* Get internal-temp src/dst to use for a sequence of instructions
721 * generated by a single TGSI op.
722 */
723 static struct tgsi_src_register *
724 get_internal_temp(struct ir3_compile_context *ctx,
725 struct tgsi_dst_register *tmp_dst)
726 {
727 struct tgsi_src_register *tmp_src;
728 int n;
729
730 tmp_dst->File = TGSI_FILE_TEMPORARY;
731 tmp_dst->WriteMask = TGSI_WRITEMASK_XYZW;
732 tmp_dst->Indirect = 0;
733 tmp_dst->Dimension = 0;
734
735 /* assign next temporary: */
736 n = ctx->num_internal_temps++;
737 compile_assert(ctx, n < ARRAY_SIZE(ctx->internal_temps));
738 tmp_src = &ctx->internal_temps[n];
739
740 tmp_dst->Index = ctx->info.file_max[TGSI_FILE_TEMPORARY] + n + 1;
741
742 src_from_dst(tmp_src, tmp_dst);
743
744 return tmp_src;
745 }
746
747 static inline bool
748 is_const(struct tgsi_src_register *src)
749 {
750 return (src->File == TGSI_FILE_CONSTANT) ||
751 (src->File == TGSI_FILE_IMMEDIATE);
752 }
753
754 static inline bool
755 is_relative(struct tgsi_src_register *src)
756 {
757 return src->Indirect;
758 }
759
760 static inline bool
761 is_rel_or_const(struct tgsi_src_register *src)
762 {
763 return is_relative(src) || is_const(src);
764 }
765
766 static type_t
767 get_ftype(struct ir3_compile_context *ctx)
768 {
769 return TYPE_F32;
770 }
771
772 static type_t
773 get_utype(struct ir3_compile_context *ctx)
774 {
775 return TYPE_U32;
776 }
777
778 static type_t
779 get_stype(struct ir3_compile_context *ctx)
780 {
781 return TYPE_S32;
782 }
783
784 static unsigned
785 src_swiz(struct tgsi_src_register *src, int chan)
786 {
787 switch (chan) {
788 case 0: return src->SwizzleX;
789 case 1: return src->SwizzleY;
790 case 2: return src->SwizzleZ;
791 case 3: return src->SwizzleW;
792 }
793 assert(0);
794 return 0;
795 }
796
797 /* for instructions that cannot take a const register as src, if needed
798 * generate a move to temporary gpr:
799 */
800 static struct tgsi_src_register *
801 get_unconst(struct ir3_compile_context *ctx, struct tgsi_src_register *src)
802 {
803 struct tgsi_dst_register tmp_dst;
804 struct tgsi_src_register *tmp_src;
805
806 compile_assert(ctx, is_rel_or_const(src));
807
808 tmp_src = get_internal_temp(ctx, &tmp_dst);
809
810 create_mov(ctx, &tmp_dst, src);
811
812 return tmp_src;
813 }
814
815 static void
816 get_immediate(struct ir3_compile_context *ctx,
817 struct tgsi_src_register *reg, uint32_t val)
818 {
819 unsigned neg, swiz, idx, i;
820 /* actually maps 1:1 currently.. not sure if that is safe to rely on: */
821 static const unsigned swiz2tgsi[] = {
822 TGSI_SWIZZLE_X, TGSI_SWIZZLE_Y, TGSI_SWIZZLE_Z, TGSI_SWIZZLE_W,
823 };
824
825 for (i = 0; i < ctx->immediate_idx; i++) {
826 swiz = i % 4;
827 idx = i / 4;
828
829 if (ctx->so->immediates[idx].val[swiz] == val) {
830 neg = 0;
831 break;
832 }
833
834 if (ctx->so->immediates[idx].val[swiz] == -val) {
835 neg = 1;
836 break;
837 }
838 }
839
840 if (i == ctx->immediate_idx) {
841 /* need to generate a new immediate: */
842 swiz = i % 4;
843 idx = i / 4;
844 neg = 0;
845 ctx->so->immediates[idx].val[swiz] = val;
846 ctx->so->immediates_count = idx + 1;
847 ctx->immediate_idx++;
848 }
849
850 reg->File = TGSI_FILE_IMMEDIATE;
851 reg->Indirect = 0;
852 reg->Dimension = 0;
853 reg->Index = idx;
854 reg->Absolute = 0;
855 reg->Negate = neg;
856 reg->SwizzleX = swiz2tgsi[swiz];
857 reg->SwizzleY = swiz2tgsi[swiz];
858 reg->SwizzleZ = swiz2tgsi[swiz];
859 reg->SwizzleW = swiz2tgsi[swiz];
860 }
861
862 static void
863 create_mov(struct ir3_compile_context *ctx, struct tgsi_dst_register *dst,
864 struct tgsi_src_register *src)
865 {
866 type_t type_mov = get_ftype(ctx);
867 unsigned i;
868
869 for (i = 0; i < 4; i++) {
870 /* move to destination: */
871 if (dst->WriteMask & (1 << i)) {
872 struct ir3_instruction *instr;
873
874 if (src->Absolute || src->Negate) {
875 /* can't have abs or neg on a mov instr, so use
876 * absneg.f instead to handle these cases:
877 */
878 instr = instr_create(ctx, 2, OPC_ABSNEG_F);
879 } else {
880 instr = instr_create(ctx, 1, 0);
881 instr->cat1.src_type = type_mov;
882 instr->cat1.dst_type = type_mov;
883 }
884
885 add_dst_reg(ctx, instr, dst, i);
886 add_src_reg(ctx, instr, src, src_swiz(src, i));
887 }
888 }
889 }
890
891 static void
892 create_clamp(struct ir3_compile_context *ctx,
893 struct tgsi_dst_register *dst, struct tgsi_src_register *val,
894 struct tgsi_src_register *minval, struct tgsi_src_register *maxval)
895 {
896 struct ir3_instruction *instr;
897
898 instr = instr_create(ctx, 2, OPC_MAX_F);
899 vectorize(ctx, instr, dst, 2, val, 0, minval, 0);
900
901 instr = instr_create(ctx, 2, OPC_MIN_F);
902 vectorize(ctx, instr, dst, 2, val, 0, maxval, 0);
903 }
904
905 static void
906 create_clamp_imm(struct ir3_compile_context *ctx,
907 struct tgsi_dst_register *dst,
908 uint32_t minval, uint32_t maxval)
909 {
910 struct tgsi_src_register minconst, maxconst;
911 struct tgsi_src_register src;
912
913 src_from_dst(&src, dst);
914
915 get_immediate(ctx, &minconst, minval);
916 get_immediate(ctx, &maxconst, maxval);
917
918 create_clamp(ctx, dst, &src, &minconst, &maxconst);
919 }
920
921 static struct tgsi_dst_register *
922 get_dst(struct ir3_compile_context *ctx, struct tgsi_full_instruction *inst)
923 {
924 struct tgsi_dst_register *dst = &inst->Dst[0].Register;
925 unsigned i;
926
927 compile_assert(ctx, !ctx->using_tmp_dst);
928 ctx->using_tmp_dst = true;
929
930 for (i = 0; i < inst->Instruction.NumSrcRegs; i++) {
931 struct tgsi_src_register *src = &inst->Src[i].Register;
932 if ((src->File == dst->File) && (src->Index == dst->Index)) {
933 if ((dst->WriteMask == TGSI_WRITEMASK_XYZW) &&
934 (src->SwizzleX == TGSI_SWIZZLE_X) &&
935 (src->SwizzleY == TGSI_SWIZZLE_Y) &&
936 (src->SwizzleZ == TGSI_SWIZZLE_Z) &&
937 (src->SwizzleW == TGSI_SWIZZLE_W))
938 continue;
939 ctx->tmp_src = get_internal_temp(ctx, &ctx->tmp_dst);
940 ctx->tmp_dst.WriteMask = dst->WriteMask;
941 dst = &ctx->tmp_dst;
942 break;
943 }
944 }
945 return dst;
946 }
947
948 static void
949 put_dst(struct ir3_compile_context *ctx, struct tgsi_full_instruction *inst,
950 struct tgsi_dst_register *dst)
951 {
952 compile_assert(ctx, ctx->using_tmp_dst);
953 ctx->using_tmp_dst = false;
954
955 /* if necessary, add mov back into original dst: */
956 if (dst != &inst->Dst[0].Register) {
957 create_mov(ctx, &inst->Dst[0].Register, ctx->tmp_src);
958 }
959 }
960
961 /* helper to generate the necessary repeat and/or additional instructions
962 * to turn a scalar instruction into a vector operation:
963 */
964 static void
965 vectorize(struct ir3_compile_context *ctx, struct ir3_instruction *instr,
966 struct tgsi_dst_register *dst, int nsrcs, ...)
967 {
968 va_list ap;
969 int i, j, n = 0;
970
971 instr_atomic_start(ctx);
972
973 add_dst_reg(ctx, instr, dst, TGSI_SWIZZLE_X);
974
975 va_start(ap, nsrcs);
976 for (j = 0; j < nsrcs; j++) {
977 struct tgsi_src_register *src =
978 va_arg(ap, struct tgsi_src_register *);
979 unsigned flags = va_arg(ap, unsigned);
980 struct ir3_register *reg;
981 if (flags & IR3_REG_IMMED) {
982 reg = ir3_reg_create(instr, 0, IR3_REG_IMMED);
983 /* this is an ugly cast.. should have put flags first! */
984 reg->iim_val = *(int *)&src;
985 } else {
986 reg = add_src_reg(ctx, instr, src, TGSI_SWIZZLE_X);
987 }
988 reg->flags |= flags & ~IR3_REG_NEGATE;
989 if (flags & IR3_REG_NEGATE)
990 reg->flags ^= IR3_REG_NEGATE;
991 }
992 va_end(ap);
993
994 for (i = 0; i < 4; i++) {
995 if (dst->WriteMask & (1 << i)) {
996 struct ir3_instruction *cur;
997
998 if (n++ == 0) {
999 cur = instr;
1000 } else {
1001 cur = instr_clone(ctx, instr);
1002 }
1003
1004 ssa_dst(ctx, cur, dst, i);
1005
1006 /* fix-up dst register component: */
1007 cur->regs[0]->num = regid(cur->regs[0]->num >> 2, i);
1008
1009 /* fix-up src register component: */
1010 va_start(ap, nsrcs);
1011 for (j = 0; j < nsrcs; j++) {
1012 struct ir3_register *reg = cur->regs[j+1];
1013 struct tgsi_src_register *src =
1014 va_arg(ap, struct tgsi_src_register *);
1015 unsigned flags = va_arg(ap, unsigned);
1016 if (reg->flags & IR3_REG_SSA) {
1017 ssa_src(ctx, reg, src, src_swiz(src, i));
1018 } else if (!(flags & IR3_REG_IMMED)) {
1019 reg->num = regid(reg->num >> 2, src_swiz(src, i));
1020 }
1021 }
1022 va_end(ap);
1023 }
1024 }
1025
1026 instr_atomic_end(ctx);
1027 }
1028
1029 /*
1030 * Handlers for TGSI instructions which do not have a 1:1 mapping to
1031 * native instructions:
1032 */
1033
1034 static void
1035 trans_clamp(const struct instr_translater *t,
1036 struct ir3_compile_context *ctx,
1037 struct tgsi_full_instruction *inst)
1038 {
1039 struct tgsi_dst_register *dst = get_dst(ctx, inst);
1040 struct tgsi_src_register *src0 = &inst->Src[0].Register;
1041 struct tgsi_src_register *src1 = &inst->Src[1].Register;
1042 struct tgsi_src_register *src2 = &inst->Src[2].Register;
1043
1044 create_clamp(ctx, dst, src0, src1, src2);
1045
1046 put_dst(ctx, inst, dst);
1047 }
1048
1049 /* ARL(x) = x, but mova from hrN.x to a0.. */
1050 static void
1051 trans_arl(const struct instr_translater *t,
1052 struct ir3_compile_context *ctx,
1053 struct tgsi_full_instruction *inst)
1054 {
1055 struct ir3_instruction *instr;
1056 struct tgsi_dst_register tmp_dst;
1057 struct tgsi_src_register *tmp_src;
1058 struct tgsi_dst_register *dst = &inst->Dst[0].Register;
1059 struct tgsi_src_register *src = &inst->Src[0].Register;
1060 unsigned chan = src->SwizzleX;
1061
1062 compile_assert(ctx, dst->File == TGSI_FILE_ADDRESS);
1063
1064 /* NOTE: we allocate a temporary from a flat register
1065 * namespace (ignoring half vs full). It turns out
1066 * not to really matter since registers get reassigned
1067 * later in ir3_ra which (hopefully!) can deal a bit
1068 * better with mixed half and full precision.
1069 */
1070 tmp_src = get_internal_temp(ctx, &tmp_dst);
1071
1072 /* cov.{u,f}{32,16}s16 Rtmp, Rsrc */
1073 instr = instr_create(ctx, 1, 0);
1074 instr->cat1.src_type = (t->tgsi_opc == TGSI_OPCODE_ARL) ?
1075 get_ftype(ctx) : get_utype(ctx);
1076 instr->cat1.dst_type = TYPE_S16;
1077 add_dst_reg(ctx, instr, &tmp_dst, chan)->flags |= IR3_REG_HALF;
1078 add_src_reg(ctx, instr, src, chan);
1079
1080 /* shl.b Rtmp, Rtmp, 2 */
1081 instr = instr_create(ctx, 2, OPC_SHL_B);
1082 add_dst_reg(ctx, instr, &tmp_dst, chan)->flags |= IR3_REG_HALF;
1083 add_src_reg(ctx, instr, tmp_src, chan)->flags |= IR3_REG_HALF;
1084 ir3_reg_create(instr, 0, IR3_REG_IMMED)->iim_val = 2;
1085
1086 /* mova a0, Rtmp */
1087 instr = instr_create(ctx, 1, 0);
1088 instr->cat1.src_type = TYPE_S16;
1089 instr->cat1.dst_type = TYPE_S16;
1090 add_dst_reg(ctx, instr, dst, 0)->flags |= IR3_REG_HALF;
1091 add_src_reg(ctx, instr, tmp_src, chan)->flags |= IR3_REG_HALF;
1092 }
1093
1094 /*
1095 * texture fetch/sample instructions:
1096 */
1097
1098 struct tex_info {
1099 int8_t order[4];
1100 int8_t args;
1101 unsigned src_wrmask, flags;
1102 };
1103
1104 struct target_info {
1105 uint8_t dims;
1106 uint8_t cube;
1107 uint8_t array;
1108 uint8_t shadow;
1109 };
1110
1111 static const struct target_info tex_targets[] = {
1112 [TGSI_TEXTURE_1D] = { 1, 0, 0, 0 },
1113 [TGSI_TEXTURE_2D] = { 2, 0, 0, 0 },
1114 [TGSI_TEXTURE_3D] = { 3, 0, 0, 0 },
1115 [TGSI_TEXTURE_CUBE] = { 3, 1, 0, 0 },
1116 [TGSI_TEXTURE_RECT] = { 2, 0, 0, 0 },
1117 [TGSI_TEXTURE_SHADOW1D] = { 1, 0, 0, 1 },
1118 [TGSI_TEXTURE_SHADOW2D] = { 2, 0, 0, 1 },
1119 [TGSI_TEXTURE_SHADOWRECT] = { 2, 0, 0, 1 },
1120 [TGSI_TEXTURE_1D_ARRAY] = { 1, 0, 1, 0 },
1121 [TGSI_TEXTURE_2D_ARRAY] = { 2, 0, 1, 0 },
1122 [TGSI_TEXTURE_SHADOW1D_ARRAY] = { 1, 0, 1, 1 },
1123 [TGSI_TEXTURE_SHADOW2D_ARRAY] = { 2, 0, 1, 1 },
1124 [TGSI_TEXTURE_SHADOWCUBE] = { 3, 1, 0, 1 },
1125 [TGSI_TEXTURE_2D_MSAA] = { 2, 0, 0, 0 },
1126 [TGSI_TEXTURE_2D_ARRAY_MSAA] = { 2, 0, 1, 0 },
1127 [TGSI_TEXTURE_CUBE_ARRAY] = { 3, 1, 1, 0 },
1128 [TGSI_TEXTURE_SHADOWCUBE_ARRAY] = { 3, 1, 1, 1 },
1129 };
1130
1131 static void
1132 fill_tex_info(struct ir3_compile_context *ctx,
1133 struct tgsi_full_instruction *inst,
1134 struct tex_info *info)
1135 {
1136 const struct target_info *tgt = &tex_targets[inst->Texture.Texture];
1137
1138 if (tgt->dims == 3)
1139 info->flags |= IR3_INSTR_3D;
1140 if (tgt->array)
1141 info->flags |= IR3_INSTR_A;
1142 if (tgt->shadow)
1143 info->flags |= IR3_INSTR_S;
1144
1145 switch (inst->Instruction.Opcode) {
1146 case TGSI_OPCODE_TXB:
1147 case TGSI_OPCODE_TXB2:
1148 case TGSI_OPCODE_TXL:
1149 case TGSI_OPCODE_TXF:
1150 info->args = 2;
1151 break;
1152 case TGSI_OPCODE_TXP:
1153 info->flags |= IR3_INSTR_P;
1154 /* fallthrough */
1155 case TGSI_OPCODE_TEX:
1156 case TGSI_OPCODE_TXD:
1157 info->args = 1;
1158 break;
1159 }
1160
1161 /*
1162 * lay out the first argument in the proper order:
1163 * - actual coordinates first
1164 * - array index
1165 * - shadow reference
1166 * - projection w
1167 *
1168 * bias/lod go into the second arg
1169 */
1170 int arg, pos = 0;
1171 for (arg = 0; arg < tgt->dims; arg++)
1172 info->order[arg] = pos++;
1173 if (tgt->dims == 1)
1174 info->order[pos++] = -1;
1175 if (tgt->shadow)
1176 info->order[pos++] = MAX2(arg + tgt->array, 2);
1177 if (tgt->array)
1178 info->order[pos++] = arg++;
1179 if (info->flags & IR3_INSTR_P)
1180 info->order[pos++] = 3;
1181
1182 info->src_wrmask = (1 << pos) - 1;
1183
1184 for (; pos < 4; pos++)
1185 info->order[pos] = -1;
1186
1187 assert(pos <= 4);
1188 }
1189
1190 static bool check_swiz(struct tgsi_src_register *src, const int8_t order[4])
1191 {
1192 unsigned i;
1193 for (i = 1; (i < 4) && order[i] >= 0; i++)
1194 if (src_swiz(src, i) != (src_swiz(src, 0) + order[i]))
1195 return false;
1196 return true;
1197 }
1198
1199 static bool is_1d(unsigned tex)
1200 {
1201 return tex_targets[tex].dims == 1;
1202 }
1203
1204 static struct tgsi_src_register *
1205 get_tex_coord(struct ir3_compile_context *ctx,
1206 struct tgsi_full_instruction *inst,
1207 const struct tex_info *tinf)
1208 {
1209 struct tgsi_src_register *coord = &inst->Src[0].Register;
1210 struct ir3_instruction *instr;
1211 unsigned tex = inst->Texture.Texture;
1212 bool needs_mov = false;
1213
1214 /* cat5 instruction cannot seem to handle const or relative: */
1215 if (is_rel_or_const(coord))
1216 needs_mov = true;
1217
1218 /* 1D textures we fix up w/ 0.5 as 2nd coord: */
1219 if (is_1d(tex))
1220 needs_mov = true;
1221
1222 /* The texture sample instructions need to coord in successive
1223 * registers/components (ie. src.xy but not src.yx). And TXP
1224 * needs the .w component in .z for 2D.. so in some cases we
1225 * might need to emit some mov instructions to shuffle things
1226 * around:
1227 */
1228 if (!needs_mov)
1229 needs_mov = !check_swiz(coord, tinf->order);
1230
1231 if (needs_mov) {
1232 struct tgsi_dst_register tmp_dst;
1233 struct tgsi_src_register *tmp_src;
1234 unsigned j;
1235
1236 type_t type_mov = get_ftype(ctx);
1237
1238 /* need to move things around: */
1239 tmp_src = get_internal_temp(ctx, &tmp_dst);
1240
1241 for (j = 0; j < 4; j++) {
1242 if (tinf->order[j] < 0)
1243 continue;
1244 instr = instr_create(ctx, 1, 0); /* mov */
1245 instr->cat1.src_type = type_mov;
1246 instr->cat1.dst_type = type_mov;
1247 add_dst_reg(ctx, instr, &tmp_dst, j);
1248 add_src_reg(ctx, instr, coord,
1249 src_swiz(coord, tinf->order[j]));
1250 }
1251
1252 /* fix up .y coord: */
1253 if (is_1d(tex)) {
1254 struct ir3_register *imm;
1255 instr = instr_create(ctx, 1, 0); /* mov */
1256 instr->cat1.src_type = type_mov;
1257 instr->cat1.dst_type = type_mov;
1258 add_dst_reg(ctx, instr, &tmp_dst, 1); /* .y */
1259 imm = ir3_reg_create(instr, 0, IR3_REG_IMMED);
1260 if (inst->Instruction.Opcode == TGSI_OPCODE_TXF)
1261 imm->iim_val = 0;
1262 else
1263 imm->fim_val = 0.5;
1264 }
1265
1266 coord = tmp_src;
1267 }
1268
1269 return coord;
1270 }
1271
1272 static void
1273 trans_samp(const struct instr_translater *t,
1274 struct ir3_compile_context *ctx,
1275 struct tgsi_full_instruction *inst)
1276 {
1277 struct ir3_instruction *instr, *collect;
1278 struct ir3_register *reg;
1279 struct tgsi_dst_register *dst = &inst->Dst[0].Register;
1280 struct tgsi_src_register *orig, *coord, *samp, *offset, *dpdx, *dpdy;
1281 struct tgsi_src_register zero;
1282 const struct target_info *tgt = &tex_targets[inst->Texture.Texture];
1283 struct tex_info tinf;
1284 int i;
1285
1286 memset(&tinf, 0, sizeof(tinf));
1287 fill_tex_info(ctx, inst, &tinf);
1288 coord = get_tex_coord(ctx, inst, &tinf);
1289 get_immediate(ctx, &zero, 0);
1290
1291 switch (inst->Instruction.Opcode) {
1292 case TGSI_OPCODE_TXB2:
1293 orig = &inst->Src[1].Register;
1294 samp = &inst->Src[2].Register;
1295 break;
1296 case TGSI_OPCODE_TXD:
1297 orig = &inst->Src[0].Register;
1298 dpdx = &inst->Src[1].Register;
1299 dpdy = &inst->Src[2].Register;
1300 samp = &inst->Src[3].Register;
1301 if (is_rel_or_const(dpdx))
1302 dpdx = get_unconst(ctx, dpdx);
1303 if (is_rel_or_const(dpdy))
1304 dpdy = get_unconst(ctx, dpdy);
1305 break;
1306 default:
1307 orig = &inst->Src[0].Register;
1308 samp = &inst->Src[1].Register;
1309 break;
1310 }
1311 if (tinf.args > 1 && is_rel_or_const(orig))
1312 orig = get_unconst(ctx, orig);
1313
1314 /* scale up integer coords for TXF based on the LOD */
1315 if (inst->Instruction.Opcode == TGSI_OPCODE_TXF) {
1316 struct tgsi_dst_register tmp_dst;
1317 struct tgsi_src_register *tmp_src;
1318 type_t type_mov = get_utype(ctx);
1319
1320 tmp_src = get_internal_temp(ctx, &tmp_dst);
1321 for (i = 0; i < tgt->dims; i++) {
1322 instr = instr_create(ctx, 2, OPC_SHL_B);
1323 add_dst_reg(ctx, instr, &tmp_dst, i);
1324 add_src_reg(ctx, instr, coord, src_swiz(coord, i));
1325 add_src_reg(ctx, instr, orig, orig->SwizzleW);
1326 }
1327 if (tgt->dims < 2) {
1328 instr = instr_create(ctx, 1, 0);
1329 instr->cat1.src_type = type_mov;
1330 instr->cat1.dst_type = type_mov;
1331 add_dst_reg(ctx, instr, &tmp_dst, i);
1332 add_src_reg(ctx, instr, &zero, 0);
1333 i++;
1334 }
1335 if (tgt->array) {
1336 instr = instr_create(ctx, 1, 0);
1337 instr->cat1.src_type = type_mov;
1338 instr->cat1.dst_type = type_mov;
1339 add_dst_reg(ctx, instr, &tmp_dst, i);
1340 add_src_reg(ctx, instr, coord, src_swiz(coord, i));
1341 }
1342 coord = tmp_src;
1343 }
1344
1345 if (inst->Texture.NumOffsets) {
1346 struct tgsi_texture_offset *tex_offset = &inst->TexOffsets[0];
1347 struct tgsi_src_register offset_src = {0};
1348
1349 offset_src.File = tex_offset->File;
1350 offset_src.Index = tex_offset->Index;
1351 offset_src.SwizzleX = tex_offset->SwizzleX;
1352 offset_src.SwizzleY = tex_offset->SwizzleY;
1353 offset_src.SwizzleZ = tex_offset->SwizzleZ;
1354 offset = get_unconst(ctx, &offset_src);
1355 tinf.flags |= IR3_INSTR_O;
1356 }
1357
1358 instr = instr_create(ctx, 5, t->opc);
1359 instr->cat5.type = get_ftype(ctx);
1360 instr->cat5.samp = samp->Index;
1361 instr->cat5.tex = samp->Index;
1362 instr->flags |= tinf.flags;
1363
1364 add_dst_reg_wrmask(ctx, instr, dst, 0, dst->WriteMask);
1365
1366 reg = ir3_reg_create(instr, 0, IR3_REG_SSA);
1367
1368 collect = ir3_instr_create(ctx->block, -1, OPC_META_FI);
1369 ir3_reg_create(collect, 0, 0);
1370 for (i = 0; i < 4; i++)
1371 if (tinf.src_wrmask & (1 << i))
1372 ssa_src(ctx, ir3_reg_create(collect, 0, IR3_REG_SSA),
1373 coord, src_swiz(coord, i));
1374 else if (tinf.src_wrmask & ~((1 << i) - 1))
1375 ir3_reg_create(collect, 0, 0);
1376
1377 /* Attach derivatives onto the end of the fan-in. Derivatives start after
1378 * the 4th argument, so make sure that fi is padded up to 4 first.
1379 */
1380 if (inst->Instruction.Opcode == TGSI_OPCODE_TXD) {
1381 while (collect->regs_count < 5)
1382 ssa_src(ctx, ir3_reg_create(collect, 0, IR3_REG_SSA), &zero, 0);
1383 for (i = 0; i < tgt->dims; i++)
1384 ssa_src(ctx, ir3_reg_create(collect, 0, IR3_REG_SSA), dpdx, i);
1385 if (tgt->dims < 2)
1386 ssa_src(ctx, ir3_reg_create(collect, 0, IR3_REG_SSA), &zero, 0);
1387 for (i = 0; i < tgt->dims; i++)
1388 ssa_src(ctx, ir3_reg_create(collect, 0, IR3_REG_SSA), dpdy, i);
1389 if (tgt->dims < 2)
1390 ssa_src(ctx, ir3_reg_create(collect, 0, IR3_REG_SSA), &zero, 0);
1391 tinf.src_wrmask |= ((1 << (2 * MAX2(tgt->dims, 2))) - 1) << 4;
1392 }
1393
1394 reg->instr = collect;
1395 reg->wrmask = tinf.src_wrmask;
1396
1397 /* The second argument contains the offsets, followed by the lod/bias
1398 * argument. This is constructed more manually due to the dynamic nature.
1399 */
1400 if (inst->Texture.NumOffsets == 0 && tinf.args == 1)
1401 return;
1402
1403 reg = ir3_reg_create(instr, 0, IR3_REG_SSA);
1404
1405 collect = ir3_instr_create(ctx->block, -1, OPC_META_FI);
1406 ir3_reg_create(collect, 0, 0);
1407
1408 if (inst->Texture.NumOffsets) {
1409 for (i = 0; i < tgt->dims; i++)
1410 ssa_src(ctx, ir3_reg_create(collect, 0, IR3_REG_SSA),
1411 offset, i);
1412 if (tgt->dims < 2)
1413 ssa_src(ctx, ir3_reg_create(collect, 0, IR3_REG_SSA), &zero, 0);
1414 }
1415 if (inst->Instruction.Opcode == TGSI_OPCODE_TXB2)
1416 ssa_src(ctx, ir3_reg_create(collect, 0, IR3_REG_SSA),
1417 orig, orig->SwizzleX);
1418 else if (tinf.args > 1)
1419 ssa_src(ctx, ir3_reg_create(collect, 0, IR3_REG_SSA),
1420 orig, orig->SwizzleW);
1421
1422 reg->instr = collect;
1423 reg->wrmask = (1 << (collect->regs_count - 1)) - 1;
1424 }
1425
1426 static void
1427 trans_txq(const struct instr_translater *t,
1428 struct ir3_compile_context *ctx,
1429 struct tgsi_full_instruction *inst)
1430 {
1431 struct ir3_instruction *instr;
1432 struct tgsi_dst_register *dst = &inst->Dst[0].Register;
1433 struct tgsi_src_register *level = &inst->Src[0].Register;
1434 struct tgsi_src_register *samp = &inst->Src[1].Register;
1435 struct tex_info tinf;
1436
1437 memset(&tinf, 0, sizeof(tinf));
1438 fill_tex_info(ctx, inst, &tinf);
1439 if (is_rel_or_const(level))
1440 level = get_unconst(ctx, level);
1441
1442 instr = instr_create(ctx, 5, OPC_GETSIZE);
1443 instr->cat5.type = get_utype(ctx);
1444 instr->cat5.samp = samp->Index;
1445 instr->cat5.tex = samp->Index;
1446 instr->flags |= tinf.flags;
1447
1448 add_dst_reg_wrmask(ctx, instr, dst, 0, dst->WriteMask);
1449 add_src_reg_wrmask(ctx, instr, level, level->SwizzleX, 0x1);
1450 }
1451
1452 /* DDX/DDY */
1453 static void
1454 trans_deriv(const struct instr_translater *t,
1455 struct ir3_compile_context *ctx,
1456 struct tgsi_full_instruction *inst)
1457 {
1458 struct ir3_instruction *instr;
1459 struct tgsi_dst_register *dst = &inst->Dst[0].Register;
1460 struct tgsi_src_register *src = &inst->Src[0].Register;
1461 static const int8_t order[4] = {0, 1, 2, 3};
1462
1463 if (!check_swiz(src, order)) {
1464 struct tgsi_dst_register tmp_dst;
1465 struct tgsi_src_register *tmp_src;
1466
1467 tmp_src = get_internal_temp(ctx, &tmp_dst);
1468 create_mov(ctx, &tmp_dst, src);
1469
1470 src = tmp_src;
1471 }
1472
1473 /* This might be a workaround for hw bug? Blob compiler always
1474 * seems to work two components at a time for dsy/dsx. It does
1475 * actually seem to work in some cases (or at least some piglit
1476 * tests) for four components at a time. But seems more reliable
1477 * to split this into two instructions like the blob compiler
1478 * does:
1479 */
1480
1481 instr = instr_create(ctx, 5, t->opc);
1482 instr->cat5.type = get_ftype(ctx);
1483 add_dst_reg_wrmask(ctx, instr, dst, 0, dst->WriteMask & 0x3);
1484 add_src_reg_wrmask(ctx, instr, src, 0, dst->WriteMask & 0x3);
1485
1486 instr = instr_create(ctx, 5, t->opc);
1487 instr->cat5.type = get_ftype(ctx);
1488 add_dst_reg_wrmask(ctx, instr, dst, 2, (dst->WriteMask >> 2) & 0x3);
1489 add_src_reg_wrmask(ctx, instr, src, 2, (dst->WriteMask >> 2) & 0x3);
1490 }
1491
1492 /*
1493 * SEQ(a,b) = (a == b) ? 1.0 : 0.0
1494 * cmps.f.eq tmp0, a, b
1495 * cov.u16f16 dst, tmp0
1496 *
1497 * SNE(a,b) = (a != b) ? 1.0 : 0.0
1498 * cmps.f.ne tmp0, a, b
1499 * cov.u16f16 dst, tmp0
1500 *
1501 * SGE(a,b) = (a >= b) ? 1.0 : 0.0
1502 * cmps.f.ge tmp0, a, b
1503 * cov.u16f16 dst, tmp0
1504 *
1505 * SLE(a,b) = (a <= b) ? 1.0 : 0.0
1506 * cmps.f.le tmp0, a, b
1507 * cov.u16f16 dst, tmp0
1508 *
1509 * SGT(a,b) = (a > b) ? 1.0 : 0.0
1510 * cmps.f.gt tmp0, a, b
1511 * cov.u16f16 dst, tmp0
1512 *
1513 * SLT(a,b) = (a < b) ? 1.0 : 0.0
1514 * cmps.f.lt tmp0, a, b
1515 * cov.u16f16 dst, tmp0
1516 *
1517 * CMP(a,b,c) = (a < 0.0) ? b : c
1518 * cmps.f.lt tmp0, a, {0.0}
1519 * sel.b16 dst, b, tmp0, c
1520 */
1521 static void
1522 trans_cmp(const struct instr_translater *t,
1523 struct ir3_compile_context *ctx,
1524 struct tgsi_full_instruction *inst)
1525 {
1526 struct ir3_instruction *instr;
1527 struct tgsi_dst_register tmp_dst;
1528 struct tgsi_src_register *tmp_src;
1529 struct tgsi_src_register constval0;
1530 /* final instruction for CMP() uses orig src1 and src2: */
1531 struct tgsi_dst_register *dst = get_dst(ctx, inst);
1532 struct tgsi_src_register *a0, *a1, *a2;
1533 unsigned condition;
1534
1535 tmp_src = get_internal_temp(ctx, &tmp_dst);
1536
1537 a0 = &inst->Src[0].Register; /* a */
1538 a1 = &inst->Src[1].Register; /* b */
1539
1540 switch (t->tgsi_opc) {
1541 case TGSI_OPCODE_SEQ:
1542 case TGSI_OPCODE_FSEQ:
1543 condition = IR3_COND_EQ;
1544 break;
1545 case TGSI_OPCODE_SNE:
1546 case TGSI_OPCODE_FSNE:
1547 condition = IR3_COND_NE;
1548 break;
1549 case TGSI_OPCODE_SGE:
1550 case TGSI_OPCODE_FSGE:
1551 condition = IR3_COND_GE;
1552 break;
1553 case TGSI_OPCODE_SLT:
1554 case TGSI_OPCODE_FSLT:
1555 condition = IR3_COND_LT;
1556 break;
1557 case TGSI_OPCODE_SLE:
1558 condition = IR3_COND_LE;
1559 break;
1560 case TGSI_OPCODE_SGT:
1561 condition = IR3_COND_GT;
1562 break;
1563 case TGSI_OPCODE_CMP:
1564 get_immediate(ctx, &constval0, fui(0.0));
1565 a0 = &inst->Src[0].Register; /* a */
1566 a1 = &constval0; /* {0.0} */
1567 condition = IR3_COND_LT;
1568 break;
1569 default:
1570 compile_assert(ctx, 0);
1571 return;
1572 }
1573
1574 if (is_const(a0) && is_const(a1))
1575 a0 = get_unconst(ctx, a0);
1576
1577 /* cmps.f.<cond> tmp, a0, a1 */
1578 instr = instr_create(ctx, 2, OPC_CMPS_F);
1579 instr->cat2.condition = condition;
1580 vectorize(ctx, instr, &tmp_dst, 2, a0, 0, a1, 0);
1581
1582 switch (t->tgsi_opc) {
1583 case TGSI_OPCODE_SEQ:
1584 case TGSI_OPCODE_SGE:
1585 case TGSI_OPCODE_SLE:
1586 case TGSI_OPCODE_SNE:
1587 case TGSI_OPCODE_SGT:
1588 case TGSI_OPCODE_SLT:
1589 /* cov.u16f16 dst, tmp0 */
1590 instr = instr_create(ctx, 1, 0);
1591 instr->cat1.src_type = get_utype(ctx);
1592 instr->cat1.dst_type = get_ftype(ctx);
1593 vectorize(ctx, instr, dst, 1, tmp_src, 0);
1594 break;
1595 case TGSI_OPCODE_FSEQ:
1596 case TGSI_OPCODE_FSGE:
1597 case TGSI_OPCODE_FSNE:
1598 case TGSI_OPCODE_FSLT:
1599 /* absneg.s dst, (neg)tmp0 */
1600 instr = instr_create(ctx, 2, OPC_ABSNEG_S);
1601 vectorize(ctx, instr, dst, 1, tmp_src, IR3_REG_NEGATE);
1602 break;
1603 case TGSI_OPCODE_CMP:
1604 a1 = &inst->Src[1].Register;
1605 a2 = &inst->Src[2].Register;
1606 /* sel.{b32,b16} dst, src2, tmp, src1 */
1607 instr = instr_create(ctx, 3, OPC_SEL_B32);
1608 vectorize(ctx, instr, dst, 3, a1, 0, tmp_src, 0, a2, 0);
1609
1610 break;
1611 }
1612
1613 put_dst(ctx, inst, dst);
1614 }
1615
1616 /*
1617 * USNE(a,b) = (a != b) ? ~0 : 0
1618 * cmps.u32.ne dst, a, b
1619 *
1620 * USEQ(a,b) = (a == b) ? ~0 : 0
1621 * cmps.u32.eq dst, a, b
1622 *
1623 * ISGE(a,b) = (a > b) ? ~0 : 0
1624 * cmps.s32.ge dst, a, b
1625 *
1626 * USGE(a,b) = (a > b) ? ~0 : 0
1627 * cmps.u32.ge dst, a, b
1628 *
1629 * ISLT(a,b) = (a < b) ? ~0 : 0
1630 * cmps.s32.lt dst, a, b
1631 *
1632 * USLT(a,b) = (a < b) ? ~0 : 0
1633 * cmps.u32.lt dst, a, b
1634 *
1635 */
1636 static void
1637 trans_icmp(const struct instr_translater *t,
1638 struct ir3_compile_context *ctx,
1639 struct tgsi_full_instruction *inst)
1640 {
1641 struct ir3_instruction *instr;
1642 struct tgsi_dst_register *dst = get_dst(ctx, inst);
1643 struct tgsi_dst_register tmp_dst;
1644 struct tgsi_src_register *tmp_src;
1645 struct tgsi_src_register *a0, *a1;
1646 unsigned condition;
1647
1648 a0 = &inst->Src[0].Register; /* a */
1649 a1 = &inst->Src[1].Register; /* b */
1650
1651 switch (t->tgsi_opc) {
1652 case TGSI_OPCODE_USNE:
1653 condition = IR3_COND_NE;
1654 break;
1655 case TGSI_OPCODE_USEQ:
1656 condition = IR3_COND_EQ;
1657 break;
1658 case TGSI_OPCODE_ISGE:
1659 case TGSI_OPCODE_USGE:
1660 condition = IR3_COND_GE;
1661 break;
1662 case TGSI_OPCODE_ISLT:
1663 case TGSI_OPCODE_USLT:
1664 condition = IR3_COND_LT;
1665 break;
1666
1667 default:
1668 compile_assert(ctx, 0);
1669 return;
1670 }
1671
1672 if (is_const(a0) && is_const(a1))
1673 a0 = get_unconst(ctx, a0);
1674
1675 tmp_src = get_internal_temp(ctx, &tmp_dst);
1676 /* cmps.{u32,s32}.<cond> tmp, a0, a1 */
1677 instr = instr_create(ctx, 2, t->opc);
1678 instr->cat2.condition = condition;
1679 vectorize(ctx, instr, &tmp_dst, 2, a0, 0, a1, 0);
1680
1681 /* absneg.s dst, (neg)tmp */
1682 instr = instr_create(ctx, 2, OPC_ABSNEG_S);
1683 vectorize(ctx, instr, dst, 1, tmp_src, IR3_REG_NEGATE);
1684
1685 put_dst(ctx, inst, dst);
1686 }
1687
1688 /*
1689 * UCMP(a,b,c) = a ? b : c
1690 * sel.b16 dst, b, a, c
1691 */
1692 static void
1693 trans_ucmp(const struct instr_translater *t,
1694 struct ir3_compile_context *ctx,
1695 struct tgsi_full_instruction *inst)
1696 {
1697 struct ir3_instruction *instr;
1698 struct tgsi_dst_register *dst = get_dst(ctx, inst);
1699 struct tgsi_src_register *a0, *a1, *a2;
1700
1701 a0 = &inst->Src[0].Register; /* a */
1702 a1 = &inst->Src[1].Register; /* b */
1703 a2 = &inst->Src[2].Register; /* c */
1704
1705 if (is_rel_or_const(a0))
1706 a0 = get_unconst(ctx, a0);
1707
1708 /* sel.{b32,b16} dst, b, a, c */
1709 instr = instr_create(ctx, 3, OPC_SEL_B32);
1710 vectorize(ctx, instr, dst, 3, a1, 0, a0, 0, a2, 0);
1711 put_dst(ctx, inst, dst);
1712 }
1713
1714 /*
1715 * ISSG(a) = a < 0 ? -1 : a > 0 ? 1 : 0
1716 * cmps.s.lt tmp_neg, a, 0 # 1 if a is negative
1717 * cmps.s.gt tmp_pos, a, 0 # 1 if a is positive
1718 * sub.u dst, tmp_pos, tmp_neg
1719 */
1720 static void
1721 trans_issg(const struct instr_translater *t,
1722 struct ir3_compile_context *ctx,
1723 struct tgsi_full_instruction *inst)
1724 {
1725 struct ir3_instruction *instr;
1726 struct tgsi_dst_register *dst = get_dst(ctx, inst);
1727 struct tgsi_src_register *a = &inst->Src[0].Register;
1728 struct tgsi_dst_register neg_dst, pos_dst;
1729 struct tgsi_src_register *neg_src, *pos_src;
1730
1731 neg_src = get_internal_temp(ctx, &neg_dst);
1732 pos_src = get_internal_temp(ctx, &pos_dst);
1733
1734 /* cmps.s.lt neg, a, 0 */
1735 instr = instr_create(ctx, 2, OPC_CMPS_S);
1736 instr->cat2.condition = IR3_COND_LT;
1737 vectorize(ctx, instr, &neg_dst, 2, a, 0, 0, IR3_REG_IMMED);
1738
1739 /* cmps.s.gt pos, a, 0 */
1740 instr = instr_create(ctx, 2, OPC_CMPS_S);
1741 instr->cat2.condition = IR3_COND_GT;
1742 vectorize(ctx, instr, &pos_dst, 2, a, 0, 0, IR3_REG_IMMED);
1743
1744 /* sub.u dst, pos, neg */
1745 instr = instr_create(ctx, 2, OPC_SUB_U);
1746 vectorize(ctx, instr, dst, 2, pos_src, 0, neg_src, 0);
1747
1748 put_dst(ctx, inst, dst);
1749 }
1750
1751
1752
1753 /*
1754 * Conditional / Flow control
1755 */
1756
1757 static void
1758 push_branch(struct ir3_compile_context *ctx, bool inv,
1759 struct ir3_instruction *instr, struct ir3_instruction *cond)
1760 {
1761 unsigned int idx = ctx->branch_count++;
1762 compile_assert(ctx, idx < ARRAY_SIZE(ctx->branch));
1763 ctx->branch[idx].instr = instr;
1764 ctx->branch[idx].inv = inv;
1765 /* else side of branch has same condition: */
1766 if (!inv)
1767 ctx->branch[idx].cond = cond;
1768 }
1769
1770 static struct ir3_instruction *
1771 pop_branch(struct ir3_compile_context *ctx)
1772 {
1773 unsigned int idx = --ctx->branch_count;
1774 return ctx->branch[idx].instr;
1775 }
1776
1777 static void
1778 trans_if(const struct instr_translater *t,
1779 struct ir3_compile_context *ctx,
1780 struct tgsi_full_instruction *inst)
1781 {
1782 struct ir3_instruction *instr, *cond;
1783 struct tgsi_src_register *src = &inst->Src[0].Register;
1784 struct tgsi_dst_register tmp_dst;
1785 struct tgsi_src_register *tmp_src;
1786 struct tgsi_src_register constval;
1787
1788 get_immediate(ctx, &constval, fui(0.0));
1789 tmp_src = get_internal_temp(ctx, &tmp_dst);
1790
1791 if (is_const(src))
1792 src = get_unconst(ctx, src);
1793
1794 /* cmps.{f,u}.ne tmp0, b, {0.0} */
1795 instr = instr_create(ctx, 2, t->opc);
1796 add_dst_reg(ctx, instr, &tmp_dst, 0);
1797 add_src_reg(ctx, instr, src, src->SwizzleX);
1798 add_src_reg(ctx, instr, &constval, constval.SwizzleX);
1799 instr->cat2.condition = IR3_COND_NE;
1800
1801 compile_assert(ctx, instr->regs[1]->flags & IR3_REG_SSA); /* because get_unconst() */
1802 cond = instr->regs[1]->instr;
1803
1804 /* meta:flow tmp0 */
1805 instr = instr_create(ctx, -1, OPC_META_FLOW);
1806 ir3_reg_create(instr, 0, 0); /* dummy dst */
1807 add_src_reg(ctx, instr, tmp_src, TGSI_SWIZZLE_X);
1808
1809 push_branch(ctx, false, instr, cond);
1810 instr->flow.if_block = push_block(ctx);
1811 }
1812
1813 static void
1814 trans_else(const struct instr_translater *t,
1815 struct ir3_compile_context *ctx,
1816 struct tgsi_full_instruction *inst)
1817 {
1818 struct ir3_instruction *instr;
1819
1820 pop_block(ctx);
1821
1822 instr = pop_branch(ctx);
1823
1824 compile_assert(ctx, (instr->category == -1) &&
1825 (instr->opc == OPC_META_FLOW));
1826
1827 push_branch(ctx, true, instr, NULL);
1828 instr->flow.else_block = push_block(ctx);
1829 }
1830
1831 static struct ir3_instruction *
1832 find_temporary(struct ir3_block *block, unsigned n)
1833 {
1834 if (block->parent && !block->temporaries[n])
1835 return find_temporary(block->parent, n);
1836 return block->temporaries[n];
1837 }
1838
1839 static struct ir3_instruction *
1840 find_output(struct ir3_block *block, unsigned n)
1841 {
1842 if (block->parent && !block->outputs[n])
1843 return find_output(block->parent, n);
1844 return block->outputs[n];
1845 }
1846
1847 static struct ir3_instruction *
1848 create_phi(struct ir3_compile_context *ctx, struct ir3_instruction *cond,
1849 struct ir3_instruction *a, struct ir3_instruction *b)
1850 {
1851 struct ir3_instruction *phi;
1852
1853 compile_assert(ctx, cond);
1854
1855 /* Either side of the condition could be null.. which
1856 * indicates a variable written on only one side of the
1857 * branch. Normally this should only be variables not
1858 * used outside of that side of the branch. So we could
1859 * just 'return a ? a : b;' in that case. But for better
1860 * defined undefined behavior we just stick in imm{0.0}.
1861 * In the common case of a value only used within the
1862 * one side of the branch, the PHI instruction will not
1863 * get scheduled
1864 */
1865 if (!a)
1866 a = create_immed(ctx, 0.0);
1867 if (!b)
1868 b = create_immed(ctx, 0.0);
1869
1870 phi = instr_create(ctx, -1, OPC_META_PHI);
1871 ir3_reg_create(phi, 0, 0); /* dummy dst */
1872 ir3_reg_create(phi, 0, IR3_REG_SSA)->instr = cond;
1873 ir3_reg_create(phi, 0, IR3_REG_SSA)->instr = a;
1874 ir3_reg_create(phi, 0, IR3_REG_SSA)->instr = b;
1875
1876 return phi;
1877 }
1878
1879 static void
1880 trans_endif(const struct instr_translater *t,
1881 struct ir3_compile_context *ctx,
1882 struct tgsi_full_instruction *inst)
1883 {
1884 struct ir3_instruction *instr;
1885 struct ir3_block *ifb, *elseb;
1886 struct ir3_instruction **ifout, **elseout;
1887 unsigned i, ifnout = 0, elsenout = 0;
1888
1889 pop_block(ctx);
1890
1891 instr = pop_branch(ctx);
1892
1893 compile_assert(ctx, (instr->category == -1) &&
1894 (instr->opc == OPC_META_FLOW));
1895
1896 ifb = instr->flow.if_block;
1897 elseb = instr->flow.else_block;
1898 /* if there is no else block, the parent block is used for the
1899 * branch-not-taken src of the PHI instructions:
1900 */
1901 if (!elseb)
1902 elseb = ifb->parent;
1903
1904 /* worst case sizes: */
1905 ifnout = ifb->ntemporaries + ifb->noutputs;
1906 elsenout = elseb->ntemporaries + elseb->noutputs;
1907
1908 ifout = ir3_alloc(ctx->ir, sizeof(ifb->outputs[0]) * ifnout);
1909 if (elseb != ifb->parent)
1910 elseout = ir3_alloc(ctx->ir, sizeof(ifb->outputs[0]) * elsenout);
1911
1912 ifnout = 0;
1913 elsenout = 0;
1914
1915 /* generate PHI instructions for any temporaries written: */
1916 for (i = 0; i < ifb->ntemporaries; i++) {
1917 struct ir3_instruction *a = ifb->temporaries[i];
1918 struct ir3_instruction *b = elseb->temporaries[i];
1919
1920 /* if temporary written in if-block, or if else block
1921 * is present and temporary written in else-block:
1922 */
1923 if (a || ((elseb != ifb->parent) && b)) {
1924 struct ir3_instruction *phi;
1925
1926 /* if only written on one side, find the closest
1927 * enclosing update on other side:
1928 */
1929 if (!a)
1930 a = find_temporary(ifb, i);
1931 if (!b)
1932 b = find_temporary(elseb, i);
1933
1934 ifout[ifnout] = a;
1935 a = create_output(ifb, a, ifnout++);
1936
1937 if (elseb != ifb->parent) {
1938 elseout[elsenout] = b;
1939 b = create_output(elseb, b, elsenout++);
1940 }
1941
1942 phi = create_phi(ctx, instr, a, b);
1943 ctx->block->temporaries[i] = phi;
1944 }
1945 }
1946
1947 compile_assert(ctx, ifb->noutputs == elseb->noutputs);
1948
1949 /* .. and any outputs written: */
1950 for (i = 0; i < ifb->noutputs; i++) {
1951 struct ir3_instruction *a = ifb->outputs[i];
1952 struct ir3_instruction *b = elseb->outputs[i];
1953
1954 /* if output written in if-block, or if else block
1955 * is present and output written in else-block:
1956 */
1957 if (a || ((elseb != ifb->parent) && b)) {
1958 struct ir3_instruction *phi;
1959
1960 /* if only written on one side, find the closest
1961 * enclosing update on other side:
1962 */
1963 if (!a)
1964 a = find_output(ifb, i);
1965 if (!b)
1966 b = find_output(elseb, i);
1967
1968 ifout[ifnout] = a;
1969 a = create_output(ifb, a, ifnout++);
1970
1971 if (elseb != ifb->parent) {
1972 elseout[elsenout] = b;
1973 b = create_output(elseb, b, elsenout++);
1974 }
1975
1976 phi = create_phi(ctx, instr, a, b);
1977 ctx->block->outputs[i] = phi;
1978 }
1979 }
1980
1981 ifb->noutputs = ifnout;
1982 ifb->outputs = ifout;
1983
1984 if (elseb != ifb->parent) {
1985 elseb->noutputs = elsenout;
1986 elseb->outputs = elseout;
1987 }
1988
1989 // TODO maybe we want to compact block->inputs?
1990 }
1991
1992 /*
1993 * Kill
1994 */
1995
1996 static void
1997 trans_kill(const struct instr_translater *t,
1998 struct ir3_compile_context *ctx,
1999 struct tgsi_full_instruction *inst)
2000 {
2001 struct ir3_instruction *instr, *immed, *cond = NULL;
2002 bool inv = false;
2003
2004 switch (t->tgsi_opc) {
2005 case TGSI_OPCODE_KILL:
2006 /* unconditional kill, use enclosing if condition: */
2007 if (ctx->branch_count > 0) {
2008 unsigned int idx = ctx->branch_count - 1;
2009 cond = ctx->branch[idx].cond;
2010 inv = ctx->branch[idx].inv;
2011 } else {
2012 cond = create_immed(ctx, 1.0);
2013 }
2014
2015 break;
2016 }
2017
2018 compile_assert(ctx, cond);
2019
2020 immed = create_immed(ctx, 0.0);
2021
2022 /* cmps.f.ne p0.x, cond, {0.0} */
2023 instr = instr_create(ctx, 2, OPC_CMPS_F);
2024 instr->cat2.condition = IR3_COND_NE;
2025 ir3_reg_create(instr, regid(REG_P0, 0), 0);
2026 ir3_reg_create(instr, 0, IR3_REG_SSA)->instr = cond;
2027 ir3_reg_create(instr, 0, IR3_REG_SSA)->instr = immed;
2028 cond = instr;
2029
2030 /* kill p0.x */
2031 instr = instr_create(ctx, 0, OPC_KILL);
2032 instr->cat0.inv = inv;
2033 ir3_reg_create(instr, 0, 0); /* dummy dst */
2034 ir3_reg_create(instr, 0, IR3_REG_SSA)->instr = cond;
2035
2036 ctx->kill[ctx->kill_count++] = instr;
2037 }
2038
2039 /*
2040 * Kill-If
2041 */
2042
2043 static void
2044 trans_killif(const struct instr_translater *t,
2045 struct ir3_compile_context *ctx,
2046 struct tgsi_full_instruction *inst)
2047 {
2048 struct tgsi_src_register *src = &inst->Src[0].Register;
2049 struct ir3_instruction *instr, *immed, *cond = NULL;
2050 bool inv = false;
2051
2052 immed = create_immed(ctx, 0.0);
2053
2054 /* cmps.f.ne p0.x, cond, {0.0} */
2055 instr = instr_create(ctx, 2, OPC_CMPS_F);
2056 instr->cat2.condition = IR3_COND_NE;
2057 ir3_reg_create(instr, regid(REG_P0, 0), 0);
2058 ir3_reg_create(instr, 0, IR3_REG_SSA)->instr = immed;
2059 add_src_reg(ctx, instr, src, src->SwizzleX);
2060
2061 cond = instr;
2062
2063 /* kill p0.x */
2064 instr = instr_create(ctx, 0, OPC_KILL);
2065 instr->cat0.inv = inv;
2066 ir3_reg_create(instr, 0, 0); /* dummy dst */
2067 ir3_reg_create(instr, 0, IR3_REG_SSA)->instr = cond;
2068
2069 ctx->kill[ctx->kill_count++] = instr;
2070
2071 }
2072 /*
2073 * I2F / U2F / F2I / F2U
2074 */
2075
2076 static void
2077 trans_cov(const struct instr_translater *t,
2078 struct ir3_compile_context *ctx,
2079 struct tgsi_full_instruction *inst)
2080 {
2081 struct ir3_instruction *instr;
2082 struct tgsi_dst_register *dst = get_dst(ctx, inst);
2083 struct tgsi_src_register *src = &inst->Src[0].Register;
2084
2085 // cov.f32s32 dst, tmp0 /
2086 instr = instr_create(ctx, 1, 0);
2087 switch (t->tgsi_opc) {
2088 case TGSI_OPCODE_U2F:
2089 instr->cat1.src_type = TYPE_U32;
2090 instr->cat1.dst_type = TYPE_F32;
2091 break;
2092 case TGSI_OPCODE_I2F:
2093 instr->cat1.src_type = TYPE_S32;
2094 instr->cat1.dst_type = TYPE_F32;
2095 break;
2096 case TGSI_OPCODE_F2U:
2097 instr->cat1.src_type = TYPE_F32;
2098 instr->cat1.dst_type = TYPE_U32;
2099 break;
2100 case TGSI_OPCODE_F2I:
2101 instr->cat1.src_type = TYPE_F32;
2102 instr->cat1.dst_type = TYPE_S32;
2103 break;
2104
2105 }
2106 vectorize(ctx, instr, dst, 1, src, 0);
2107 put_dst(ctx, inst, dst);
2108 }
2109
2110 /*
2111 * UMUL / UMAD
2112 *
2113 * There is no 32-bit multiply instruction, so splitting a and b into high and
2114 * low components, we get that
2115 *
2116 * dst = al * bl + ah * bl << 16 + al * bh << 16
2117 *
2118 * mull.u tmp0, a, b (mul low, i.e. al * bl)
2119 * madsh.m16 tmp1, a, b, tmp0 (mul-add shift high mix, i.e. ah * bl << 16)
2120 * madsh.m16 dst, b, a, tmp1 (i.e. al * bh << 16)
2121 *
2122 * For UMAD, replace first mull.u with mad.u16.
2123 */
2124 static void
2125 trans_umul(const struct instr_translater *t,
2126 struct ir3_compile_context *ctx,
2127 struct tgsi_full_instruction *inst)
2128 {
2129 struct ir3_instruction *instr;
2130 struct tgsi_dst_register *dst = get_dst(ctx, inst);
2131 struct tgsi_src_register *a = &inst->Src[0].Register;
2132 struct tgsi_src_register *b = &inst->Src[1].Register;
2133
2134 struct tgsi_dst_register tmp0_dst, tmp1_dst;
2135 struct tgsi_src_register *tmp0_src, *tmp1_src;
2136
2137 tmp0_src = get_internal_temp(ctx, &tmp0_dst);
2138 tmp1_src = get_internal_temp(ctx, &tmp1_dst);
2139
2140 if (is_rel_or_const(a))
2141 a = get_unconst(ctx, a);
2142 if (is_rel_or_const(b))
2143 b = get_unconst(ctx, b);
2144
2145 if (t->tgsi_opc == TGSI_OPCODE_UMUL) {
2146 /* mull.u tmp0, a, b */
2147 instr = instr_create(ctx, 2, OPC_MULL_U);
2148 vectorize(ctx, instr, &tmp0_dst, 2, a, 0, b, 0);
2149 } else {
2150 struct tgsi_src_register *c = &inst->Src[2].Register;
2151
2152 /* mad.u16 tmp0, a, b, c */
2153 instr = instr_create(ctx, 3, OPC_MAD_U16);
2154 vectorize(ctx, instr, &tmp0_dst, 3, a, 0, b, 0, c, 0);
2155 }
2156
2157 /* madsh.m16 tmp1, a, b, tmp0 */
2158 instr = instr_create(ctx, 3, OPC_MADSH_M16);
2159 vectorize(ctx, instr, &tmp1_dst, 3, a, 0, b, 0, tmp0_src, 0);
2160
2161 /* madsh.m16 dst, b, a, tmp1 */
2162 instr = instr_create(ctx, 3, OPC_MADSH_M16);
2163 vectorize(ctx, instr, dst, 3, b, 0, a, 0, tmp1_src, 0);
2164 put_dst(ctx, inst, dst);
2165 }
2166
2167 /*
2168 * IDIV / UDIV / MOD / UMOD
2169 *
2170 * See NV50LegalizeSSA::handleDIV for the origin of this implementation. For
2171 * MOD/UMOD, it becomes a - [IU]DIV(a, modulus) * modulus.
2172 */
2173 static void
2174 trans_idiv(const struct instr_translater *t,
2175 struct ir3_compile_context *ctx,
2176 struct tgsi_full_instruction *inst)
2177 {
2178 struct ir3_instruction *instr;
2179 struct tgsi_dst_register *dst = get_dst(ctx, inst), *premod_dst = dst;
2180 struct tgsi_src_register *a = &inst->Src[0].Register;
2181 struct tgsi_src_register *b = &inst->Src[1].Register;
2182
2183 struct tgsi_dst_register af_dst, bf_dst, q_dst, r_dst, a_dst, b_dst;
2184 struct tgsi_src_register *af_src, *bf_src, *q_src, *r_src, *a_src, *b_src;
2185
2186 struct tgsi_src_register negative_2, thirty_one;
2187 type_t src_type;
2188
2189 if (t->tgsi_opc == TGSI_OPCODE_IDIV || t->tgsi_opc == TGSI_OPCODE_MOD)
2190 src_type = get_stype(ctx);
2191 else
2192 src_type = get_utype(ctx);
2193
2194 af_src = get_internal_temp(ctx, &af_dst);
2195 bf_src = get_internal_temp(ctx, &bf_dst);
2196 q_src = get_internal_temp(ctx, &q_dst);
2197 r_src = get_internal_temp(ctx, &r_dst);
2198 a_src = get_internal_temp(ctx, &a_dst);
2199 b_src = get_internal_temp(ctx, &b_dst);
2200
2201 get_immediate(ctx, &negative_2, -2);
2202 get_immediate(ctx, &thirty_one, 31);
2203
2204 if (t->tgsi_opc == TGSI_OPCODE_MOD || t->tgsi_opc == TGSI_OPCODE_UMOD)
2205 premod_dst = &q_dst;
2206
2207 /* cov.[us]32f32 af, numerator */
2208 instr = instr_create(ctx, 1, 0);
2209 instr->cat1.src_type = src_type;
2210 instr->cat1.dst_type = get_ftype(ctx);
2211 vectorize(ctx, instr, &af_dst, 1, a, 0);
2212
2213 /* cov.[us]32f32 bf, denominator */
2214 instr = instr_create(ctx, 1, 0);
2215 instr->cat1.src_type = src_type;
2216 instr->cat1.dst_type = get_ftype(ctx);
2217 vectorize(ctx, instr, &bf_dst, 1, b, 0);
2218
2219 /* Get the absolute values for IDIV */
2220 if (type_sint(src_type)) {
2221 /* absneg.f af, (abs)af */
2222 instr = instr_create(ctx, 2, OPC_ABSNEG_F);
2223 vectorize(ctx, instr, &af_dst, 1, af_src, IR3_REG_ABS);
2224
2225 /* absneg.f bf, (abs)bf */
2226 instr = instr_create(ctx, 2, OPC_ABSNEG_F);
2227 vectorize(ctx, instr, &bf_dst, 1, bf_src, IR3_REG_ABS);
2228
2229 /* absneg.s a, (abs)numerator */
2230 instr = instr_create(ctx, 2, OPC_ABSNEG_S);
2231 vectorize(ctx, instr, &a_dst, 1, a, IR3_REG_ABS);
2232
2233 /* absneg.s b, (abs)denominator */
2234 instr = instr_create(ctx, 2, OPC_ABSNEG_S);
2235 vectorize(ctx, instr, &b_dst, 1, b, IR3_REG_ABS);
2236 } else {
2237 /* mov.u32u32 a, numerator */
2238 instr = instr_create(ctx, 1, 0);
2239 instr->cat1.src_type = src_type;
2240 instr->cat1.dst_type = src_type;
2241 vectorize(ctx, instr, &a_dst, 1, a, 0);
2242
2243 /* mov.u32u32 b, denominator */
2244 instr = instr_create(ctx, 1, 0);
2245 instr->cat1.src_type = src_type;
2246 instr->cat1.dst_type = src_type;
2247 vectorize(ctx, instr, &b_dst, 1, b, 0);
2248 }
2249
2250 /* rcp.f bf, bf */
2251 instr = instr_create(ctx, 4, OPC_RCP);
2252 vectorize(ctx, instr, &bf_dst, 1, bf_src, 0);
2253
2254 /* That's right, subtract 2 as an integer from the float */
2255 /* add.u bf, bf, -2 */
2256 instr = instr_create(ctx, 2, OPC_ADD_U);
2257 vectorize(ctx, instr, &bf_dst, 2, bf_src, 0, &negative_2, 0);
2258
2259 /* mul.f q, af, bf */
2260 instr = instr_create(ctx, 2, OPC_MUL_F);
2261 vectorize(ctx, instr, &q_dst, 2, af_src, 0, bf_src, 0);
2262
2263 /* cov.f32[us]32 q, q */
2264 instr = instr_create(ctx, 1, 0);
2265 instr->cat1.src_type = get_ftype(ctx);
2266 instr->cat1.dst_type = src_type;
2267 vectorize(ctx, instr, &q_dst, 1, q_src, 0);
2268
2269 /* integer multiply q by b */
2270 /* mull.u r, q, b */
2271 instr = instr_create(ctx, 2, OPC_MULL_U);
2272 vectorize(ctx, instr, &r_dst, 2, q_src, 0, b_src, 0);
2273
2274 /* madsh.m16 r, q, b, r */
2275 instr = instr_create(ctx, 3, OPC_MADSH_M16);
2276 vectorize(ctx, instr, &r_dst, 3, q_src, 0, b_src, 0, r_src, 0);
2277
2278 /* madsh.m16, r, b, q, r */
2279 instr = instr_create(ctx, 3, OPC_MADSH_M16);
2280 vectorize(ctx, instr, &r_dst, 3, b_src, 0, q_src, 0, r_src, 0);
2281
2282 /* sub.u r, a, r */
2283 instr = instr_create(ctx, 2, OPC_SUB_U);
2284 vectorize(ctx, instr, &r_dst, 2, a_src, 0, r_src, 0);
2285
2286 /* cov.u32f32, r, r */
2287 instr = instr_create(ctx, 1, 0);
2288 instr->cat1.src_type = get_utype(ctx);
2289 instr->cat1.dst_type = get_ftype(ctx);
2290 vectorize(ctx, instr, &r_dst, 1, r_src, 0);
2291
2292 /* mul.f r, r, bf */
2293 instr = instr_create(ctx, 2, OPC_MUL_F);
2294 vectorize(ctx, instr, &r_dst, 2, r_src, 0, bf_src, 0);
2295
2296 /* cov.f32u32 r, r */
2297 instr = instr_create(ctx, 1, 0);
2298 instr->cat1.src_type = get_ftype(ctx);
2299 instr->cat1.dst_type = get_utype(ctx);
2300 vectorize(ctx, instr, &r_dst, 1, r_src, 0);
2301
2302 /* add.u q, q, r */
2303 instr = instr_create(ctx, 2, OPC_ADD_U);
2304 vectorize(ctx, instr, &q_dst, 2, q_src, 0, r_src, 0);
2305
2306 /* mull.u r, q, b */
2307 instr = instr_create(ctx, 2, OPC_MULL_U);
2308 vectorize(ctx, instr, &r_dst, 2, q_src, 0, b_src, 0);
2309
2310 /* madsh.m16 r, q, b, r */
2311 instr = instr_create(ctx, 3, OPC_MADSH_M16);
2312 vectorize(ctx, instr, &r_dst, 3, q_src, 0, b_src, 0, r_src, 0);
2313
2314 /* madsh.m16 r, b, q, r */
2315 instr = instr_create(ctx, 3, OPC_MADSH_M16);
2316 vectorize(ctx, instr, &r_dst, 3, b_src, 0, q_src, 0, r_src, 0);
2317
2318 /* sub.u r, a, r */
2319 instr = instr_create(ctx, 2, OPC_SUB_U);
2320 vectorize(ctx, instr, &r_dst, 2, a_src, 0, r_src, 0);
2321
2322 /* cmps.u.ge r, r, b */
2323 instr = instr_create(ctx, 2, OPC_CMPS_U);
2324 instr->cat2.condition = IR3_COND_GE;
2325 vectorize(ctx, instr, &r_dst, 2, r_src, 0, b_src, 0);
2326
2327 if (type_uint(src_type)) {
2328 /* add.u dst, q, r */
2329 instr = instr_create(ctx, 2, OPC_ADD_U);
2330 vectorize(ctx, instr, premod_dst, 2, q_src, 0, r_src, 0);
2331 } else {
2332 /* add.u q, q, r */
2333 instr = instr_create(ctx, 2, OPC_ADD_U);
2334 vectorize(ctx, instr, &q_dst, 2, q_src, 0, r_src, 0);
2335
2336 /* negate result based on the original arguments */
2337 if (is_const(a) && is_const(b))
2338 a = get_unconst(ctx, a);
2339
2340 /* xor.b r, numerator, denominator */
2341 instr = instr_create(ctx, 2, OPC_XOR_B);
2342 vectorize(ctx, instr, &r_dst, 2, a, 0, b, 0);
2343
2344 /* shr.b r, r, 31 */
2345 instr = instr_create(ctx, 2, OPC_SHR_B);
2346 vectorize(ctx, instr, &r_dst, 2, r_src, 0, &thirty_one, 0);
2347
2348 /* absneg.s b, (neg)q */
2349 instr = instr_create(ctx, 2, OPC_ABSNEG_S);
2350 vectorize(ctx, instr, &b_dst, 1, q_src, IR3_REG_NEGATE);
2351
2352 /* sel.b dst, b, r, q */
2353 instr = instr_create(ctx, 3, OPC_SEL_B32);
2354 vectorize(ctx, instr, premod_dst, 3, b_src, 0, r_src, 0, q_src, 0);
2355 }
2356
2357 if (t->tgsi_opc == TGSI_OPCODE_MOD || t->tgsi_opc == TGSI_OPCODE_UMOD) {
2358 /* The division result will have ended up in q. */
2359
2360 /* mull.u r, q, b */
2361 instr = instr_create(ctx, 2, OPC_MULL_U);
2362 vectorize(ctx, instr, &r_dst, 2, q_src, 0, b, 0);
2363
2364 /* madsh.m16 r, q, b, r */
2365 instr = instr_create(ctx, 3, OPC_MADSH_M16);
2366 vectorize(ctx, instr, &r_dst, 3, q_src, 0, b, 0, r_src, 0);
2367
2368 /* madsh.m16 r, b, q, r */
2369 instr = instr_create(ctx, 3, OPC_MADSH_M16);
2370 vectorize(ctx, instr, &r_dst, 3, b, 0, q_src, 0, r_src, 0);
2371
2372 /* sub.u dst, a, r */
2373 instr = instr_create(ctx, 2, OPC_SUB_U);
2374 vectorize(ctx, instr, dst, 2, a, 0, r_src, 0);
2375 }
2376
2377 put_dst(ctx, inst, dst);
2378 }
2379
2380 /*
2381 * Handlers for TGSI instructions which do have 1:1 mapping to native
2382 * instructions:
2383 */
2384
2385 static void
2386 instr_cat0(const struct instr_translater *t,
2387 struct ir3_compile_context *ctx,
2388 struct tgsi_full_instruction *inst)
2389 {
2390 instr_create(ctx, 0, t->opc);
2391 }
2392
2393 static void
2394 instr_cat1(const struct instr_translater *t,
2395 struct ir3_compile_context *ctx,
2396 struct tgsi_full_instruction *inst)
2397 {
2398 struct tgsi_dst_register *dst = get_dst(ctx, inst);
2399 struct tgsi_src_register *src = &inst->Src[0].Register;
2400 create_mov(ctx, dst, src);
2401 put_dst(ctx, inst, dst);
2402 }
2403
2404 static void
2405 instr_cat2(const struct instr_translater *t,
2406 struct ir3_compile_context *ctx,
2407 struct tgsi_full_instruction *inst)
2408 {
2409 struct tgsi_dst_register *dst = get_dst(ctx, inst);
2410 struct tgsi_src_register *src0 = &inst->Src[0].Register;
2411 struct tgsi_src_register *src1 = &inst->Src[1].Register;
2412 struct ir3_instruction *instr;
2413 unsigned src0_flags = 0, src1_flags = 0;
2414
2415 switch (t->tgsi_opc) {
2416 case TGSI_OPCODE_ABS:
2417 case TGSI_OPCODE_IABS:
2418 src0_flags = IR3_REG_ABS;
2419 break;
2420 case TGSI_OPCODE_INEG:
2421 src0_flags = IR3_REG_NEGATE;
2422 break;
2423 case TGSI_OPCODE_SUB:
2424 src1_flags = IR3_REG_NEGATE;
2425 break;
2426 }
2427
2428 switch (t->opc) {
2429 case OPC_ABSNEG_F:
2430 case OPC_ABSNEG_S:
2431 case OPC_CLZ_B:
2432 case OPC_CLZ_S:
2433 case OPC_SIGN_F:
2434 case OPC_FLOOR_F:
2435 case OPC_CEIL_F:
2436 case OPC_RNDNE_F:
2437 case OPC_RNDAZ_F:
2438 case OPC_TRUNC_F:
2439 case OPC_NOT_B:
2440 case OPC_BFREV_B:
2441 case OPC_SETRM:
2442 case OPC_CBITS_B:
2443 /* these only have one src reg */
2444 instr = instr_create(ctx, 2, t->opc);
2445 vectorize(ctx, instr, dst, 1, src0, src0_flags);
2446 break;
2447 default:
2448 if (is_const(src0) && is_const(src1))
2449 src0 = get_unconst(ctx, src0);
2450
2451 instr = instr_create(ctx, 2, t->opc);
2452 vectorize(ctx, instr, dst, 2, src0, src0_flags,
2453 src1, src1_flags);
2454 break;
2455 }
2456
2457 put_dst(ctx, inst, dst);
2458 }
2459
2460 static void
2461 instr_cat3(const struct instr_translater *t,
2462 struct ir3_compile_context *ctx,
2463 struct tgsi_full_instruction *inst)
2464 {
2465 struct tgsi_dst_register *dst = get_dst(ctx, inst);
2466 struct tgsi_src_register *src0 = &inst->Src[0].Register;
2467 struct tgsi_src_register *src1 = &inst->Src[1].Register;
2468 struct ir3_instruction *instr;
2469
2470 /* in particular, can't handle const for src1 for cat3..
2471 * for mad, we can swap first two src's if needed:
2472 */
2473 if (is_rel_or_const(src1)) {
2474 if (is_mad(t->opc) && !is_rel_or_const(src0)) {
2475 struct tgsi_src_register *tmp;
2476 tmp = src0;
2477 src0 = src1;
2478 src1 = tmp;
2479 } else {
2480 src1 = get_unconst(ctx, src1);
2481 }
2482 }
2483
2484 instr = instr_create(ctx, 3, t->opc);
2485 vectorize(ctx, instr, dst, 3, src0, 0, src1, 0,
2486 &inst->Src[2].Register, 0);
2487 put_dst(ctx, inst, dst);
2488 }
2489
2490 static void
2491 instr_cat4(const struct instr_translater *t,
2492 struct ir3_compile_context *ctx,
2493 struct tgsi_full_instruction *inst)
2494 {
2495 struct tgsi_dst_register *dst = get_dst(ctx, inst);
2496 struct tgsi_src_register *src = &inst->Src[0].Register;
2497 struct ir3_instruction *instr;
2498 unsigned i;
2499
2500 /* seems like blob compiler avoids const as src.. */
2501 if (is_const(src))
2502 src = get_unconst(ctx, src);
2503
2504 /* we need to replicate into each component: */
2505 for (i = 0; i < 4; i++) {
2506 if (dst->WriteMask & (1 << i)) {
2507 instr = instr_create(ctx, 4, t->opc);
2508 add_dst_reg(ctx, instr, dst, i);
2509 add_src_reg(ctx, instr, src, src->SwizzleX);
2510 }
2511 }
2512
2513 put_dst(ctx, inst, dst);
2514 }
2515
2516 static const struct instr_translater translaters[TGSI_OPCODE_LAST] = {
2517 #define INSTR(n, f, ...) \
2518 [TGSI_OPCODE_ ## n] = { .fxn = (f), .tgsi_opc = TGSI_OPCODE_ ## n, ##__VA_ARGS__ }
2519
2520 INSTR(MOV, instr_cat1),
2521 INSTR(RCP, instr_cat4, .opc = OPC_RCP),
2522 INSTR(RSQ, instr_cat4, .opc = OPC_RSQ),
2523 INSTR(SQRT, instr_cat4, .opc = OPC_SQRT),
2524 INSTR(MUL, instr_cat2, .opc = OPC_MUL_F),
2525 INSTR(ADD, instr_cat2, .opc = OPC_ADD_F),
2526 INSTR(SUB, instr_cat2, .opc = OPC_ADD_F),
2527 INSTR(MIN, instr_cat2, .opc = OPC_MIN_F),
2528 INSTR(MAX, instr_cat2, .opc = OPC_MAX_F),
2529 INSTR(UADD, instr_cat2, .opc = OPC_ADD_U),
2530 INSTR(IMIN, instr_cat2, .opc = OPC_MIN_S),
2531 INSTR(UMIN, instr_cat2, .opc = OPC_MIN_U),
2532 INSTR(IMAX, instr_cat2, .opc = OPC_MAX_S),
2533 INSTR(UMAX, instr_cat2, .opc = OPC_MAX_U),
2534 INSTR(AND, instr_cat2, .opc = OPC_AND_B),
2535 INSTR(OR, instr_cat2, .opc = OPC_OR_B),
2536 INSTR(NOT, instr_cat2, .opc = OPC_NOT_B),
2537 INSTR(XOR, instr_cat2, .opc = OPC_XOR_B),
2538 INSTR(UMUL, trans_umul),
2539 INSTR(UMAD, trans_umul),
2540 INSTR(UDIV, trans_idiv),
2541 INSTR(IDIV, trans_idiv),
2542 INSTR(MOD, trans_idiv),
2543 INSTR(UMOD, trans_idiv),
2544 INSTR(SHL, instr_cat2, .opc = OPC_SHL_B),
2545 INSTR(USHR, instr_cat2, .opc = OPC_SHR_B),
2546 INSTR(ISHR, instr_cat2, .opc = OPC_ASHR_B),
2547 INSTR(IABS, instr_cat2, .opc = OPC_ABSNEG_S),
2548 INSTR(INEG, instr_cat2, .opc = OPC_ABSNEG_S),
2549 INSTR(AND, instr_cat2, .opc = OPC_AND_B),
2550 INSTR(MAD, instr_cat3, .opc = OPC_MAD_F32, .hopc = OPC_MAD_F16),
2551 INSTR(TRUNC, instr_cat2, .opc = OPC_TRUNC_F),
2552 INSTR(CLAMP, trans_clamp),
2553 INSTR(FLR, instr_cat2, .opc = OPC_FLOOR_F),
2554 INSTR(ROUND, instr_cat2, .opc = OPC_RNDNE_F),
2555 INSTR(SSG, instr_cat2, .opc = OPC_SIGN_F),
2556 INSTR(CEIL, instr_cat2, .opc = OPC_CEIL_F),
2557 INSTR(ARL, trans_arl),
2558 INSTR(UARL, trans_arl),
2559 INSTR(EX2, instr_cat4, .opc = OPC_EXP2),
2560 INSTR(LG2, instr_cat4, .opc = OPC_LOG2),
2561 INSTR(ABS, instr_cat2, .opc = OPC_ABSNEG_F),
2562 INSTR(COS, instr_cat4, .opc = OPC_COS),
2563 INSTR(SIN, instr_cat4, .opc = OPC_SIN),
2564 INSTR(TEX, trans_samp, .opc = OPC_SAM, .arg = TGSI_OPCODE_TEX),
2565 INSTR(TXP, trans_samp, .opc = OPC_SAM, .arg = TGSI_OPCODE_TXP),
2566 INSTR(TXB, trans_samp, .opc = OPC_SAMB, .arg = TGSI_OPCODE_TXB),
2567 INSTR(TXB2, trans_samp, .opc = OPC_SAMB, .arg = TGSI_OPCODE_TXB2),
2568 INSTR(TXL, trans_samp, .opc = OPC_SAML, .arg = TGSI_OPCODE_TXL),
2569 INSTR(TXD, trans_samp, .opc = OPC_SAMGQ, .arg = TGSI_OPCODE_TXD),
2570 INSTR(TXF, trans_samp, .opc = OPC_ISAML, .arg = TGSI_OPCODE_TXF),
2571 INSTR(TXQ, trans_txq),
2572 INSTR(DDX, trans_deriv, .opc = OPC_DSX),
2573 INSTR(DDY, trans_deriv, .opc = OPC_DSY),
2574 INSTR(SGT, trans_cmp),
2575 INSTR(SLT, trans_cmp),
2576 INSTR(FSLT, trans_cmp),
2577 INSTR(SGE, trans_cmp),
2578 INSTR(FSGE, trans_cmp),
2579 INSTR(SLE, trans_cmp),
2580 INSTR(SNE, trans_cmp),
2581 INSTR(FSNE, trans_cmp),
2582 INSTR(SEQ, trans_cmp),
2583 INSTR(FSEQ, trans_cmp),
2584 INSTR(CMP, trans_cmp),
2585 INSTR(USNE, trans_icmp, .opc = OPC_CMPS_U),
2586 INSTR(USEQ, trans_icmp, .opc = OPC_CMPS_U),
2587 INSTR(ISGE, trans_icmp, .opc = OPC_CMPS_S),
2588 INSTR(USGE, trans_icmp, .opc = OPC_CMPS_U),
2589 INSTR(ISLT, trans_icmp, .opc = OPC_CMPS_S),
2590 INSTR(USLT, trans_icmp, .opc = OPC_CMPS_U),
2591 INSTR(UCMP, trans_ucmp),
2592 INSTR(ISSG, trans_issg),
2593 INSTR(IF, trans_if, .opc = OPC_CMPS_F),
2594 INSTR(UIF, trans_if, .opc = OPC_CMPS_U),
2595 INSTR(ELSE, trans_else),
2596 INSTR(ENDIF, trans_endif),
2597 INSTR(END, instr_cat0, .opc = OPC_END),
2598 INSTR(KILL, trans_kill, .opc = OPC_KILL),
2599 INSTR(KILL_IF, trans_killif, .opc = OPC_KILL),
2600 INSTR(I2F, trans_cov),
2601 INSTR(U2F, trans_cov),
2602 INSTR(F2I, trans_cov),
2603 INSTR(F2U, trans_cov),
2604 };
2605
2606 static ir3_semantic
2607 decl_semantic(const struct tgsi_declaration_semantic *sem)
2608 {
2609 return ir3_semantic_name(sem->Name, sem->Index);
2610 }
2611
2612 static struct ir3_instruction *
2613 decl_in_frag_bary(struct ir3_compile_context *ctx, unsigned regid,
2614 unsigned j, unsigned inloc)
2615 {
2616 struct ir3_instruction *instr;
2617 struct ir3_register *src;
2618
2619 /* bary.f dst, #inloc, r0.x */
2620 instr = instr_create(ctx, 2, OPC_BARY_F);
2621 ir3_reg_create(instr, regid, 0); /* dummy dst */
2622 ir3_reg_create(instr, 0, IR3_REG_IMMED)->iim_val = inloc;
2623 src = ir3_reg_create(instr, 0, IR3_REG_SSA);
2624 src->wrmask = 0x3;
2625 src->instr = ctx->frag_pos;
2626
2627 return instr;
2628 }
2629
2630 /* TGSI_SEMANTIC_POSITION
2631 * """"""""""""""""""""""
2632 *
2633 * For fragment shaders, TGSI_SEMANTIC_POSITION is used to indicate that
2634 * fragment shader input contains the fragment's window position. The X
2635 * component starts at zero and always increases from left to right.
2636 * The Y component starts at zero and always increases but Y=0 may either
2637 * indicate the top of the window or the bottom depending on the fragment
2638 * coordinate origin convention (see TGSI_PROPERTY_FS_COORD_ORIGIN).
2639 * The Z coordinate ranges from 0 to 1 to represent depth from the front
2640 * to the back of the Z buffer. The W component contains the reciprocol
2641 * of the interpolated vertex position W component.
2642 */
2643 static struct ir3_instruction *
2644 decl_in_frag_coord(struct ir3_compile_context *ctx, unsigned regid,
2645 unsigned j)
2646 {
2647 struct ir3_instruction *instr, *src;
2648
2649 compile_assert(ctx, !ctx->frag_coord[j]);
2650
2651 ctx->frag_coord[j] = create_input(ctx->block, NULL, 0);
2652
2653
2654 switch (j) {
2655 case 0: /* .x */
2656 case 1: /* .y */
2657 /* for frag_coord, we get unsigned values.. we need
2658 * to subtract (integer) 8 and divide by 16 (right-
2659 * shift by 4) then convert to float:
2660 */
2661
2662 /* add.s tmp, src, -8 */
2663 instr = instr_create(ctx, 2, OPC_ADD_S);
2664 ir3_reg_create(instr, regid, 0); /* dummy dst */
2665 ir3_reg_create(instr, 0, IR3_REG_SSA)->instr = ctx->frag_coord[j];
2666 ir3_reg_create(instr, 0, IR3_REG_IMMED)->iim_val = -8;
2667 src = instr;
2668
2669 /* shr.b tmp, tmp, 4 */
2670 instr = instr_create(ctx, 2, OPC_SHR_B);
2671 ir3_reg_create(instr, regid, 0); /* dummy dst */
2672 ir3_reg_create(instr, 0, IR3_REG_SSA)->instr = src;
2673 ir3_reg_create(instr, 0, IR3_REG_IMMED)->iim_val = 4;
2674 src = instr;
2675
2676 /* mov.u32f32 dst, tmp */
2677 instr = instr_create(ctx, 1, 0);
2678 instr->cat1.src_type = TYPE_U32;
2679 instr->cat1.dst_type = TYPE_F32;
2680 ir3_reg_create(instr, regid, 0); /* dummy dst */
2681 ir3_reg_create(instr, 0, IR3_REG_SSA)->instr = src;
2682
2683 break;
2684 case 2: /* .z */
2685 case 3: /* .w */
2686 /* seems that we can use these as-is: */
2687 instr = ctx->frag_coord[j];
2688 break;
2689 default:
2690 compile_error(ctx, "invalid channel\n");
2691 instr = create_immed(ctx, 0.0);
2692 break;
2693 }
2694
2695 return instr;
2696 }
2697
2698 /* TGSI_SEMANTIC_FACE
2699 * """"""""""""""""""
2700 *
2701 * This label applies to fragment shader inputs only and indicates that
2702 * the register contains front/back-face information of the form (F, 0,
2703 * 0, 1). The first component will be positive when the fragment belongs
2704 * to a front-facing polygon, and negative when the fragment belongs to a
2705 * back-facing polygon.
2706 */
2707 static struct ir3_instruction *
2708 decl_in_frag_face(struct ir3_compile_context *ctx, unsigned regid,
2709 unsigned j)
2710 {
2711 struct ir3_instruction *instr, *src;
2712
2713 switch (j) {
2714 case 0: /* .x */
2715 compile_assert(ctx, !ctx->frag_face);
2716
2717 ctx->frag_face = create_input(ctx->block, NULL, 0);
2718
2719 /* for faceness, we always get -1 or 0 (int).. but TGSI expects
2720 * positive vs negative float.. and piglit further seems to
2721 * expect -1.0 or 1.0:
2722 *
2723 * mul.s tmp, hr0.x, 2
2724 * add.s tmp, tmp, 1
2725 * mov.s16f32, dst, tmp
2726 *
2727 */
2728
2729 instr = instr_create(ctx, 2, OPC_MUL_S);
2730 ir3_reg_create(instr, regid, 0); /* dummy dst */
2731 ir3_reg_create(instr, 0, IR3_REG_SSA)->instr = ctx->frag_face;
2732 ir3_reg_create(instr, 0, IR3_REG_IMMED)->iim_val = 2;
2733 src = instr;
2734
2735 instr = instr_create(ctx, 2, OPC_ADD_S);
2736 ir3_reg_create(instr, regid, 0); /* dummy dst */
2737 ir3_reg_create(instr, 0, IR3_REG_SSA)->instr = src;
2738 ir3_reg_create(instr, 0, IR3_REG_IMMED)->iim_val = 1;
2739 src = instr;
2740
2741 instr = instr_create(ctx, 1, 0); /* mov */
2742 instr->cat1.src_type = TYPE_S32;
2743 instr->cat1.dst_type = TYPE_F32;
2744 ir3_reg_create(instr, regid, 0); /* dummy dst */
2745 ir3_reg_create(instr, 0, IR3_REG_SSA)->instr = src;
2746
2747 break;
2748 case 1: /* .y */
2749 case 2: /* .z */
2750 instr = create_immed(ctx, 0.0);
2751 break;
2752 case 3: /* .w */
2753 instr = create_immed(ctx, 1.0);
2754 break;
2755 default:
2756 compile_error(ctx, "invalid channel\n");
2757 instr = create_immed(ctx, 0.0);
2758 break;
2759 }
2760
2761 return instr;
2762 }
2763
2764 static void
2765 decl_in(struct ir3_compile_context *ctx, struct tgsi_full_declaration *decl)
2766 {
2767 struct ir3_shader_variant *so = ctx->so;
2768 unsigned name = decl->Semantic.Name;
2769 unsigned i;
2770
2771 /* I don't think we should get frag shader input without
2772 * semantic info? Otherwise how do inputs get linked to
2773 * vert outputs?
2774 */
2775 compile_assert(ctx, (ctx->type == TGSI_PROCESSOR_VERTEX) ||
2776 decl->Declaration.Semantic);
2777
2778 for (i = decl->Range.First; i <= decl->Range.Last; i++) {
2779 unsigned n = so->inputs_count++;
2780 unsigned r = regid(i, 0);
2781 unsigned ncomp, j;
2782
2783 /* we'll figure out the actual components used after scheduling */
2784 ncomp = 4;
2785
2786 DBG("decl in -> r%d", i);
2787
2788 compile_assert(ctx, n < ARRAY_SIZE(so->inputs));
2789
2790 so->inputs[n].semantic = decl_semantic(&decl->Semantic);
2791 so->inputs[n].compmask = (1 << ncomp) - 1;
2792 so->inputs[n].regid = r;
2793 so->inputs[n].inloc = ctx->next_inloc;
2794 so->inputs[n].interpolate = decl->Interp.Interpolate;
2795
2796 for (j = 0; j < ncomp; j++) {
2797 struct ir3_instruction *instr = NULL;
2798
2799 if (ctx->type == TGSI_PROCESSOR_FRAGMENT) {
2800 /* for fragment shaders, POSITION and FACE are handled
2801 * specially, not using normal varying / bary.f
2802 */
2803 if (name == TGSI_SEMANTIC_POSITION) {
2804 so->inputs[n].bary = false;
2805 so->frag_coord = true;
2806 instr = decl_in_frag_coord(ctx, r + j, j);
2807 } else if (name == TGSI_SEMANTIC_FACE) {
2808 so->inputs[n].bary = false;
2809 so->frag_face = true;
2810 instr = decl_in_frag_face(ctx, r + j, j);
2811 } else {
2812 so->inputs[n].bary = true;
2813 instr = decl_in_frag_bary(ctx, r + j, j,
2814 so->inputs[n].inloc + j - 8);
2815 }
2816 } else {
2817 instr = create_input(ctx->block, NULL, (i * 4) + j);
2818 }
2819
2820 ctx->block->inputs[(i * 4) + j] = instr;
2821 }
2822
2823 if (so->inputs[n].bary || (ctx->type == TGSI_PROCESSOR_VERTEX)) {
2824 ctx->next_inloc += ncomp;
2825 so->total_in += ncomp;
2826 }
2827 }
2828 }
2829
2830 static void
2831 decl_out(struct ir3_compile_context *ctx, struct tgsi_full_declaration *decl)
2832 {
2833 struct ir3_shader_variant *so = ctx->so;
2834 unsigned comp = 0;
2835 unsigned name = decl->Semantic.Name;
2836 unsigned i;
2837
2838 compile_assert(ctx, decl->Declaration.Semantic);
2839
2840 DBG("decl out[%d] -> r%d", name, decl->Range.First);
2841
2842 if (ctx->type == TGSI_PROCESSOR_VERTEX) {
2843 switch (name) {
2844 case TGSI_SEMANTIC_POSITION:
2845 so->writes_pos = true;
2846 break;
2847 case TGSI_SEMANTIC_PSIZE:
2848 so->writes_psize = true;
2849 break;
2850 case TGSI_SEMANTIC_COLOR:
2851 case TGSI_SEMANTIC_BCOLOR:
2852 case TGSI_SEMANTIC_GENERIC:
2853 case TGSI_SEMANTIC_FOG:
2854 case TGSI_SEMANTIC_TEXCOORD:
2855 break;
2856 default:
2857 compile_error(ctx, "unknown VS semantic name: %s\n",
2858 tgsi_semantic_names[name]);
2859 }
2860 } else {
2861 switch (name) {
2862 case TGSI_SEMANTIC_POSITION:
2863 comp = 2; /* tgsi will write to .z component */
2864 so->writes_pos = true;
2865 break;
2866 case TGSI_SEMANTIC_COLOR:
2867 break;
2868 default:
2869 compile_error(ctx, "unknown FS semantic name: %s\n",
2870 tgsi_semantic_names[name]);
2871 }
2872 }
2873
2874 for (i = decl->Range.First; i <= decl->Range.Last; i++) {
2875 unsigned n = so->outputs_count++;
2876 unsigned ncomp, j;
2877
2878 ncomp = 4;
2879
2880 compile_assert(ctx, n < ARRAY_SIZE(so->outputs));
2881
2882 so->outputs[n].semantic = decl_semantic(&decl->Semantic);
2883 so->outputs[n].regid = regid(i, comp);
2884
2885 /* avoid undefined outputs, stick a dummy mov from imm{0.0},
2886 * which if the output is actually assigned will be over-
2887 * written
2888 */
2889 for (j = 0; j < ncomp; j++)
2890 ctx->block->outputs[(i * 4) + j] = create_immed(ctx, 0.0);
2891 }
2892 }
2893
2894 /* from TGSI perspective, we actually have inputs. But most of the "inputs"
2895 * for a fragment shader are just bary.f instructions. The *actual* inputs
2896 * from the hw perspective are the frag_pos and optionally frag_coord and
2897 * frag_face.
2898 */
2899 static void
2900 fixup_frag_inputs(struct ir3_compile_context *ctx)
2901 {
2902 struct ir3_shader_variant *so = ctx->so;
2903 struct ir3_block *block = ctx->block;
2904 struct ir3_instruction **inputs;
2905 struct ir3_instruction *instr;
2906 int n, regid = 0;
2907
2908 block->ninputs = 0;
2909
2910 n = 4; /* always have frag_pos */
2911 n += COND(so->frag_face, 4);
2912 n += COND(so->frag_coord, 4);
2913
2914 inputs = ir3_alloc(ctx->ir, n * (sizeof(struct ir3_instruction *)));
2915
2916 if (so->frag_face) {
2917 /* this ultimately gets assigned to hr0.x so doesn't conflict
2918 * with frag_coord/frag_pos..
2919 */
2920 inputs[block->ninputs++] = ctx->frag_face;
2921 ctx->frag_face->regs[0]->num = 0;
2922
2923 /* remaining channels not used, but let's avoid confusing
2924 * other parts that expect inputs to come in groups of vec4
2925 */
2926 inputs[block->ninputs++] = NULL;
2927 inputs[block->ninputs++] = NULL;
2928 inputs[block->ninputs++] = NULL;
2929 }
2930
2931 /* since we don't know where to set the regid for frag_coord,
2932 * we have to use r0.x for it. But we don't want to *always*
2933 * use r1.x for frag_pos as that could increase the register
2934 * footprint on simple shaders:
2935 */
2936 if (so->frag_coord) {
2937 ctx->frag_coord[0]->regs[0]->num = regid++;
2938 ctx->frag_coord[1]->regs[0]->num = regid++;
2939 ctx->frag_coord[2]->regs[0]->num = regid++;
2940 ctx->frag_coord[3]->regs[0]->num = regid++;
2941
2942 inputs[block->ninputs++] = ctx->frag_coord[0];
2943 inputs[block->ninputs++] = ctx->frag_coord[1];
2944 inputs[block->ninputs++] = ctx->frag_coord[2];
2945 inputs[block->ninputs++] = ctx->frag_coord[3];
2946 }
2947
2948 /* we always have frag_pos: */
2949 so->pos_regid = regid;
2950
2951 /* r0.x */
2952 instr = create_input(block, NULL, block->ninputs);
2953 instr->regs[0]->num = regid++;
2954 inputs[block->ninputs++] = instr;
2955 ctx->frag_pos->regs[1]->instr = instr;
2956
2957 /* r0.y */
2958 instr = create_input(block, NULL, block->ninputs);
2959 instr->regs[0]->num = regid++;
2960 inputs[block->ninputs++] = instr;
2961 ctx->frag_pos->regs[2]->instr = instr;
2962
2963 block->inputs = inputs;
2964 }
2965
2966 static void
2967 compile_instructions(struct ir3_compile_context *ctx)
2968 {
2969 push_block(ctx);
2970
2971 /* for fragment shader, we have a single input register (usually
2972 * r0.xy) which is used as the base for bary.f varying fetch instrs:
2973 */
2974 if (ctx->type == TGSI_PROCESSOR_FRAGMENT) {
2975 struct ir3_instruction *instr;
2976 instr = ir3_instr_create(ctx->block, -1, OPC_META_FI);
2977 ir3_reg_create(instr, 0, 0);
2978 ir3_reg_create(instr, 0, IR3_REG_SSA); /* r0.x */
2979 ir3_reg_create(instr, 0, IR3_REG_SSA); /* r0.y */
2980 ctx->frag_pos = instr;
2981 }
2982
2983 while (!tgsi_parse_end_of_tokens(&ctx->parser)) {
2984 tgsi_parse_token(&ctx->parser);
2985
2986 switch (ctx->parser.FullToken.Token.Type) {
2987 case TGSI_TOKEN_TYPE_DECLARATION: {
2988 struct tgsi_full_declaration *decl =
2989 &ctx->parser.FullToken.FullDeclaration;
2990 if (decl->Declaration.File == TGSI_FILE_OUTPUT) {
2991 decl_out(ctx, decl);
2992 } else if (decl->Declaration.File == TGSI_FILE_INPUT) {
2993 decl_in(ctx, decl);
2994 }
2995 break;
2996 }
2997 case TGSI_TOKEN_TYPE_IMMEDIATE: {
2998 /* TODO: if we know the immediate is small enough, and only
2999 * used with instructions that can embed an immediate, we
3000 * can skip this:
3001 */
3002 struct tgsi_full_immediate *imm =
3003 &ctx->parser.FullToken.FullImmediate;
3004 unsigned n = ctx->so->immediates_count++;
3005 compile_assert(ctx, n < ARRAY_SIZE(ctx->so->immediates));
3006 memcpy(ctx->so->immediates[n].val, imm->u, 16);
3007 break;
3008 }
3009 case TGSI_TOKEN_TYPE_INSTRUCTION: {
3010 struct tgsi_full_instruction *inst =
3011 &ctx->parser.FullToken.FullInstruction;
3012 unsigned opc = inst->Instruction.Opcode;
3013 const struct instr_translater *t = &translaters[opc];
3014
3015 if (t->fxn) {
3016 t->fxn(t, ctx, inst);
3017 ctx->num_internal_temps = 0;
3018
3019 compile_assert(ctx, !ctx->using_tmp_dst);
3020 } else {
3021 compile_error(ctx, "unknown TGSI opc: %s\n",
3022 tgsi_get_opcode_name(opc));
3023 }
3024
3025 switch (inst->Instruction.Saturate) {
3026 case TGSI_SAT_ZERO_ONE:
3027 create_clamp_imm(ctx, &inst->Dst[0].Register,
3028 fui(0.0), fui(1.0));
3029 break;
3030 case TGSI_SAT_MINUS_PLUS_ONE:
3031 create_clamp_imm(ctx, &inst->Dst[0].Register,
3032 fui(-1.0), fui(1.0));
3033 break;
3034 }
3035
3036 instr_finish(ctx);
3037
3038 break;
3039 }
3040 default:
3041 break;
3042 }
3043 }
3044 }
3045
3046 static void
3047 compile_dump(struct ir3_compile_context *ctx)
3048 {
3049 const char *name = (ctx->so->type == SHADER_VERTEX) ? "vert" : "frag";
3050 static unsigned n = 0;
3051 char fname[16];
3052 FILE *f;
3053 snprintf(fname, sizeof(fname), "%s-%04u.dot", name, n++);
3054 f = fopen(fname, "w");
3055 if (!f)
3056 return;
3057 ir3_block_depth(ctx->block);
3058 ir3_dump(ctx->ir, name, ctx->block, f);
3059 fclose(f);
3060 }
3061
3062 int
3063 ir3_compile_shader(struct ir3_shader_variant *so,
3064 const struct tgsi_token *tokens, struct ir3_shader_key key,
3065 bool cp)
3066 {
3067 struct ir3_compile_context ctx;
3068 struct ir3_block *block;
3069 struct ir3_instruction **inputs;
3070 unsigned i, j, actual_in;
3071 int ret = 0, max_bary;
3072
3073 assert(!so->ir);
3074
3075 so->ir = ir3_create();
3076
3077 assert(so->ir);
3078
3079 if (compile_init(&ctx, so, tokens) != TGSI_PARSE_OK) {
3080 DBG("INIT failed!");
3081 ret = -1;
3082 goto out;
3083 }
3084
3085 compile_instructions(&ctx);
3086
3087 block = ctx.block;
3088
3089 /* keep track of the inputs from TGSI perspective.. */
3090 inputs = block->inputs;
3091
3092 /* but fixup actual inputs for frag shader: */
3093 if (ctx.type == TGSI_PROCESSOR_FRAGMENT)
3094 fixup_frag_inputs(&ctx);
3095
3096 /* at this point, for binning pass, throw away unneeded outputs: */
3097 if (key.binning_pass) {
3098 for (i = 0, j = 0; i < so->outputs_count; i++) {
3099 unsigned name = sem2name(so->outputs[i].semantic);
3100 unsigned idx = sem2name(so->outputs[i].semantic);
3101
3102 /* throw away everything but first position/psize */
3103 if ((idx == 0) && ((name == TGSI_SEMANTIC_POSITION) ||
3104 (name == TGSI_SEMANTIC_PSIZE))) {
3105 if (i != j) {
3106 so->outputs[j] = so->outputs[i];
3107 block->outputs[(j*4)+0] = block->outputs[(i*4)+0];
3108 block->outputs[(j*4)+1] = block->outputs[(i*4)+1];
3109 block->outputs[(j*4)+2] = block->outputs[(i*4)+2];
3110 block->outputs[(j*4)+3] = block->outputs[(i*4)+3];
3111 }
3112 j++;
3113 }
3114 }
3115 so->outputs_count = j;
3116 block->noutputs = j * 4;
3117 }
3118
3119 /* for rendering to alpha format, we only need the .w component,
3120 * and we need it to be in the .x position:
3121 */
3122 if (key.alpha) {
3123 for (i = 0, j = 0; i < so->outputs_count; i++) {
3124 unsigned name = sem2name(so->outputs[i].semantic);
3125
3126 /* move .w component to .x and discard others: */
3127 if (name == TGSI_SEMANTIC_COLOR) {
3128 block->outputs[(i*4)+0] = block->outputs[(i*4)+3];
3129 block->outputs[(i*4)+1] = NULL;
3130 block->outputs[(i*4)+2] = NULL;
3131 block->outputs[(i*4)+3] = NULL;
3132 }
3133 }
3134 }
3135
3136 /* at this point, we want the kill's in the outputs array too,
3137 * so that they get scheduled (since they have no dst).. we've
3138 * already ensured that the array is big enough in push_block():
3139 */
3140 if (ctx.type == TGSI_PROCESSOR_FRAGMENT) {
3141 for (i = 0; i < ctx.kill_count; i++)
3142 block->outputs[block->noutputs++] = ctx.kill[i];
3143 }
3144
3145 if (fd_mesa_debug & FD_DBG_OPTDUMP)
3146 compile_dump(&ctx);
3147
3148 ret = ir3_block_flatten(block);
3149 if (ret < 0) {
3150 DBG("FLATTEN failed!");
3151 goto out;
3152 }
3153 if ((ret > 0) && (fd_mesa_debug & FD_DBG_OPTDUMP))
3154 compile_dump(&ctx);
3155
3156 if (fd_mesa_debug & FD_DBG_OPTMSGS) {
3157 printf("BEFORE CP:\n");
3158 ir3_dump_instr_list(block->head);
3159 }
3160
3161 if (cp)
3162 ir3_block_cp(block);
3163
3164 if (fd_mesa_debug & FD_DBG_OPTDUMP)
3165 compile_dump(&ctx);
3166
3167 ir3_block_depth(block);
3168
3169 if (fd_mesa_debug & FD_DBG_OPTMSGS) {
3170 printf("AFTER DEPTH:\n");
3171 ir3_dump_instr_list(block->head);
3172 }
3173
3174 ret = ir3_block_sched(block);
3175 if (ret) {
3176 DBG("SCHED failed!");
3177 goto out;
3178 }
3179
3180 if (fd_mesa_debug & FD_DBG_OPTMSGS) {
3181 printf("AFTER SCHED:\n");
3182 ir3_dump_instr_list(block->head);
3183 }
3184
3185 ret = ir3_block_ra(block, so->type, key.half_precision,
3186 so->frag_coord, so->frag_face, &so->has_samp, &max_bary);
3187 if (ret) {
3188 DBG("RA failed!");
3189 goto out;
3190 }
3191
3192 if (fd_mesa_debug & FD_DBG_OPTMSGS) {
3193 printf("AFTER RA:\n");
3194 ir3_dump_instr_list(block->head);
3195 }
3196
3197 /* fixup input/outputs: */
3198 for (i = 0; i < so->outputs_count; i++) {
3199 so->outputs[i].regid = block->outputs[i*4]->regs[0]->num;
3200 /* preserve hack for depth output.. tgsi writes depth to .z,
3201 * but what we give the hw is the scalar register:
3202 */
3203 if ((ctx.type == TGSI_PROCESSOR_FRAGMENT) &&
3204 (sem2name(so->outputs[i].semantic) == TGSI_SEMANTIC_POSITION))
3205 so->outputs[i].regid += 2;
3206 }
3207 /* Note that some or all channels of an input may be unused: */
3208 actual_in = 0;
3209 for (i = 0; i < so->inputs_count; i++) {
3210 unsigned j, regid = ~0, compmask = 0;
3211 so->inputs[i].ncomp = 0;
3212 for (j = 0; j < 4; j++) {
3213 struct ir3_instruction *in = inputs[(i*4) + j];
3214 if (in) {
3215 compmask |= (1 << j);
3216 regid = in->regs[0]->num - j;
3217 actual_in++;
3218 so->inputs[i].ncomp++;
3219 }
3220 }
3221 so->inputs[i].regid = regid;
3222 so->inputs[i].compmask = compmask;
3223 }
3224
3225 /* fragment shader always gets full vec4's even if it doesn't
3226 * fetch all components, but vertex shader we need to update
3227 * with the actual number of components fetch, otherwise thing
3228 * will hang due to mismaptch between VFD_DECODE's and
3229 * TOTALATTRTOVS
3230 */
3231 if (so->type == SHADER_VERTEX)
3232 so->total_in = actual_in;
3233 else
3234 so->total_in = align(max_bary + 1, 4);
3235
3236 out:
3237 if (ret) {
3238 ir3_destroy(so->ir);
3239 so->ir = NULL;
3240 }
3241 compile_free(&ctx);
3242
3243 return ret;
3244 }