st/nine: Add D3DFMT_DF16 support
[mesa.git] / src / gallium / drivers / freedreno / ir3 / ir3_compiler.c
1 /* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
2
3 /*
4 * Copyright (C) 2013 Rob Clark <robclark@freedesktop.org>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * SOFTWARE.
24 *
25 * Authors:
26 * Rob Clark <robclark@freedesktop.org>
27 */
28
29 #include <stdarg.h>
30
31 #include "pipe/p_state.h"
32 #include "util/u_string.h"
33 #include "util/u_memory.h"
34 #include "util/u_inlines.h"
35 #include "tgsi/tgsi_lowering.h"
36 #include "tgsi/tgsi_parse.h"
37 #include "tgsi/tgsi_ureg.h"
38 #include "tgsi/tgsi_info.h"
39 #include "tgsi/tgsi_strings.h"
40 #include "tgsi/tgsi_dump.h"
41 #include "tgsi/tgsi_scan.h"
42
43 #include "freedreno_util.h"
44
45 #include "ir3_compiler.h"
46 #include "ir3_shader.h"
47
48 #include "instr-a3xx.h"
49 #include "ir3.h"
50
51 struct ir3_compile_context {
52 const struct tgsi_token *tokens;
53 bool free_tokens;
54 struct ir3 *ir;
55 struct ir3_shader_variant *so;
56 uint16_t integer_s;
57
58 struct ir3_block *block;
59 struct ir3_instruction *current_instr;
60
61 /* we need to defer updates to block->outputs[] until the end
62 * of an instruction (so we don't see new value until *after*
63 * the src registers are processed)
64 */
65 struct {
66 struct ir3_instruction *instr, **instrp;
67 } output_updates[16];
68 unsigned num_output_updates;
69
70 /* are we in a sequence of "atomic" instructions?
71 */
72 bool atomic;
73
74 /* For fragment shaders, from the hw perspective the only
75 * actual input is r0.xy position register passed to bary.f.
76 * But TGSI doesn't know that, it still declares things as
77 * IN[] registers. So we do all the input tracking normally
78 * and fix things up after compile_instructions()
79 *
80 * NOTE that frag_pos is the hardware position (possibly it
81 * is actually an index or tag or some such.. it is *not*
82 * values that can be directly used for gl_FragCoord..)
83 */
84 struct ir3_instruction *frag_pos, *frag_face, *frag_coord[4];
85
86 struct tgsi_parse_context parser;
87 unsigned type;
88
89 struct tgsi_shader_info info;
90
91 /* hmm, would be nice if tgsi_scan_shader figured this out
92 * for us:
93 */
94 struct {
95 unsigned first, last;
96 struct ir3_instruction *fanin;
97 } array[16];
98 uint32_t array_dirty;
99 /* offset into array[], per file, of first array info */
100 uint8_t array_offsets[TGSI_FILE_COUNT];
101
102 /* for calculating input/output positions/linkages: */
103 unsigned next_inloc;
104
105 unsigned num_internal_temps;
106 struct tgsi_src_register internal_temps[8];
107
108 /* idx/slot for last compiler generated immediate */
109 unsigned immediate_idx;
110
111 /* stack of branch instructions that mark (potentially nested)
112 * branch if/else/loop/etc
113 */
114 struct {
115 struct ir3_instruction *instr, *cond;
116 bool inv; /* true iff in else leg of branch */
117 } branch[16];
118 unsigned int branch_count;
119
120 /* list of kill instructions: */
121 struct ir3_instruction *kill[16];
122 unsigned int kill_count;
123
124 /* used when dst is same as one of the src, to avoid overwriting a
125 * src element before the remaining scalar instructions that make
126 * up the vector operation
127 */
128 struct tgsi_dst_register tmp_dst;
129 struct tgsi_src_register *tmp_src;
130
131 /* just for catching incorrect use of get_dst()/put_dst():
132 */
133 bool using_tmp_dst;
134 };
135
136
137 static void vectorize(struct ir3_compile_context *ctx,
138 struct ir3_instruction *instr, struct tgsi_dst_register *dst,
139 int nsrcs, ...);
140 static void create_mov(struct ir3_compile_context *ctx,
141 struct tgsi_dst_register *dst, struct tgsi_src_register *src);
142 static type_t get_ftype(struct ir3_compile_context *ctx);
143
144 static unsigned setup_arrays(struct ir3_compile_context *ctx, unsigned file, unsigned i)
145 {
146 /* ArrayID 0 for a given file is the legacy array spanning the entire file: */
147 ctx->array[i].first = 0;
148 ctx->array[i].last = ctx->info.file_max[file];
149 ctx->array_offsets[file] = i;
150 i += ctx->info.array_max[file] + 1;
151 return i;
152 }
153
154 static unsigned
155 compile_init(struct ir3_compile_context *ctx, struct ir3_shader_variant *so,
156 const struct tgsi_token *tokens)
157 {
158 unsigned ret, i;
159 struct tgsi_shader_info *info = &ctx->info;
160 struct tgsi_lowering_config lconfig = {
161 .color_two_side = so->key.color_two_side,
162 .lower_DST = true,
163 .lower_XPD = true,
164 .lower_SCS = true,
165 .lower_LRP = true,
166 .lower_FRC = true,
167 .lower_POW = true,
168 .lower_LIT = true,
169 .lower_EXP = true,
170 .lower_LOG = true,
171 .lower_DP4 = true,
172 .lower_DP3 = true,
173 .lower_DPH = true,
174 .lower_DP2 = true,
175 .lower_DP2A = true,
176 };
177
178 switch (so->type) {
179 case SHADER_FRAGMENT:
180 case SHADER_COMPUTE:
181 lconfig.saturate_s = so->key.fsaturate_s;
182 lconfig.saturate_t = so->key.fsaturate_t;
183 lconfig.saturate_r = so->key.fsaturate_r;
184 ctx->integer_s = so->key.finteger_s;
185 break;
186 case SHADER_VERTEX:
187 lconfig.saturate_s = so->key.vsaturate_s;
188 lconfig.saturate_t = so->key.vsaturate_t;
189 lconfig.saturate_r = so->key.vsaturate_r;
190 ctx->integer_s = so->key.vinteger_s;
191 break;
192 }
193
194 if (!so->shader) {
195 /* hack for standalone compiler which does not have
196 * screen/context:
197 */
198 } else if (ir3_shader_gpuid(so->shader) >= 400) {
199 /* a4xx seems to have *no* sam.p */
200 lconfig.lower_TXP = ~0; /* lower all txp */
201 } else {
202 /* a3xx just needs to avoid sam.p for 3d tex */
203 lconfig.lower_TXP = (1 << TGSI_TEXTURE_3D);
204 }
205
206 ctx->tokens = tgsi_transform_lowering(&lconfig, tokens, &ctx->info);
207 ctx->free_tokens = !!ctx->tokens;
208 if (!ctx->tokens) {
209 /* no lowering */
210 ctx->tokens = tokens;
211 }
212 ctx->ir = so->ir;
213 ctx->so = so;
214 ctx->array_dirty = 0;
215 ctx->next_inloc = 8;
216 ctx->num_internal_temps = 0;
217 ctx->branch_count = 0;
218 ctx->kill_count = 0;
219 ctx->block = NULL;
220 ctx->current_instr = NULL;
221 ctx->num_output_updates = 0;
222 ctx->atomic = false;
223 ctx->frag_pos = NULL;
224 ctx->frag_face = NULL;
225 ctx->tmp_src = NULL;
226 ctx->using_tmp_dst = false;
227
228 memset(ctx->frag_coord, 0, sizeof(ctx->frag_coord));
229 memset(ctx->array, 0, sizeof(ctx->array));
230 memset(ctx->array_offsets, 0, sizeof(ctx->array_offsets));
231
232 #define FM(x) (1 << TGSI_FILE_##x)
233 /* optimize can't deal with relative addressing: */
234 if (info->indirect_files_written & (FM(TEMPORARY) | FM(INPUT) | FM(OUTPUT)))
235 return TGSI_PARSE_ERROR;
236
237 /* NOTE: if relative addressing is used, we set constlen in
238 * the compiler (to worst-case value) since we don't know in
239 * the assembler what the max addr reg value can be:
240 */
241 if (info->indirect_files & FM(CONSTANT))
242 so->constlen = 4 * (ctx->info.file_max[TGSI_FILE_CONSTANT] + 1);
243
244 i = 0;
245 i += setup_arrays(ctx, TGSI_FILE_INPUT, i);
246 i += setup_arrays(ctx, TGSI_FILE_TEMPORARY, i);
247 i += setup_arrays(ctx, TGSI_FILE_OUTPUT, i);
248 /* any others? we don't track arrays for const..*/
249
250 /* Immediates go after constants: */
251 so->first_immediate = info->file_max[TGSI_FILE_CONSTANT] + 1;
252 ctx->immediate_idx = 4 * (ctx->info.file_max[TGSI_FILE_IMMEDIATE] + 1);
253
254 ret = tgsi_parse_init(&ctx->parser, ctx->tokens);
255 if (ret != TGSI_PARSE_OK)
256 return ret;
257
258 ctx->type = ctx->parser.FullHeader.Processor.Processor;
259
260 return ret;
261 }
262
263 static void
264 compile_error(struct ir3_compile_context *ctx, const char *format, ...)
265 {
266 va_list ap;
267 va_start(ap, format);
268 _debug_vprintf(format, ap);
269 va_end(ap);
270 tgsi_dump(ctx->tokens, 0);
271 debug_assert(0);
272 }
273
274 #define compile_assert(ctx, cond) do { \
275 if (!(cond)) compile_error((ctx), "failed assert: "#cond"\n"); \
276 } while (0)
277
278 static void
279 compile_free(struct ir3_compile_context *ctx)
280 {
281 if (ctx->free_tokens)
282 free((void *)ctx->tokens);
283 tgsi_parse_free(&ctx->parser);
284 }
285
286 struct instr_translater {
287 void (*fxn)(const struct instr_translater *t,
288 struct ir3_compile_context *ctx,
289 struct tgsi_full_instruction *inst);
290 unsigned tgsi_opc;
291 opc_t opc;
292 opc_t hopc; /* opc to use for half_precision mode, if different */
293 unsigned arg;
294 };
295
296 static void
297 instr_finish(struct ir3_compile_context *ctx)
298 {
299 unsigned i;
300
301 if (ctx->atomic)
302 return;
303
304 for (i = 0; i < ctx->num_output_updates; i++)
305 *(ctx->output_updates[i].instrp) = ctx->output_updates[i].instr;
306
307 ctx->num_output_updates = 0;
308
309 while (ctx->array_dirty) {
310 unsigned aid = ffs(ctx->array_dirty) - 1;
311 ctx->array[aid].fanin = NULL;
312 ctx->array_dirty &= ~(1 << aid);
313 }
314 }
315
316 /* For "atomic" groups of instructions, for example the four scalar
317 * instructions to perform a vec4 operation. Basically this just
318 * blocks out handling of output_updates so the next scalar instruction
319 * still sees the result from before the start of the atomic group.
320 *
321 * NOTE: when used properly, this could probably replace get/put_dst()
322 * stuff.
323 */
324 static void
325 instr_atomic_start(struct ir3_compile_context *ctx)
326 {
327 ctx->atomic = true;
328 }
329
330 static void
331 instr_atomic_end(struct ir3_compile_context *ctx)
332 {
333 ctx->atomic = false;
334 instr_finish(ctx);
335 }
336
337 static struct ir3_instruction *
338 instr_create(struct ir3_compile_context *ctx, int category, opc_t opc)
339 {
340 instr_finish(ctx);
341 return (ctx->current_instr = ir3_instr_create(ctx->block, category, opc));
342 }
343
344 static struct ir3_block *
345 push_block(struct ir3_compile_context *ctx)
346 {
347 struct ir3_block *block;
348 unsigned ntmp, nin, nout;
349
350 #define SCALAR_REGS(file) (4 * (ctx->info.file_max[TGSI_FILE_ ## file] + 1))
351
352 /* hmm, give ourselves room to create 8 extra temporaries (vec4):
353 */
354 ntmp = SCALAR_REGS(TEMPORARY);
355 ntmp += 8 * 4;
356
357 nout = SCALAR_REGS(OUTPUT);
358 nin = SCALAR_REGS(INPUT);
359
360 /* for outermost block, 'inputs' are the actual shader INPUT
361 * register file. Reads from INPUT registers always go back to
362 * top block. For nested blocks, 'inputs' is used to track any
363 * TEMPORARY file register from one of the enclosing blocks that
364 * is ready in this block.
365 */
366 if (!ctx->block) {
367 /* NOTE: fragment shaders actually have two inputs (r0.xy, the
368 * position)
369 */
370 if (ctx->type == TGSI_PROCESSOR_FRAGMENT) {
371 int n = 2;
372 if (ctx->info.reads_position)
373 n += 4;
374 if (ctx->info.uses_frontface)
375 n += 4;
376 nin = MAX2(n, nin);
377 nout += ARRAY_SIZE(ctx->kill);
378 }
379 } else {
380 nin = ntmp;
381 }
382
383 block = ir3_block_create(ctx->ir, ntmp, nin, nout);
384
385 if ((ctx->type == TGSI_PROCESSOR_FRAGMENT) && !ctx->block)
386 block->noutputs -= ARRAY_SIZE(ctx->kill);
387
388 block->parent = ctx->block;
389 ctx->block = block;
390
391 return block;
392 }
393
394 static void
395 pop_block(struct ir3_compile_context *ctx)
396 {
397 ctx->block = ctx->block->parent;
398 compile_assert(ctx, ctx->block);
399 }
400
401 static struct ir3_instruction *
402 create_output(struct ir3_block *block, struct ir3_instruction *instr,
403 unsigned n)
404 {
405 struct ir3_instruction *out;
406
407 out = ir3_instr_create(block, -1, OPC_META_OUTPUT);
408 out->inout.block = block;
409 ir3_reg_create(out, n, 0);
410 if (instr)
411 ir3_reg_create(out, 0, IR3_REG_SSA)->instr = instr;
412
413 return out;
414 }
415
416 static struct ir3_instruction *
417 create_input(struct ir3_block *block, struct ir3_instruction *instr,
418 unsigned n)
419 {
420 struct ir3_instruction *in;
421
422 in = ir3_instr_create(block, -1, OPC_META_INPUT);
423 in->inout.block = block;
424 ir3_reg_create(in, n, 0);
425 if (instr)
426 ir3_reg_create(in, 0, IR3_REG_SSA)->instr = instr;
427
428 return in;
429 }
430
431 static struct ir3_instruction *
432 block_input(struct ir3_block *block, unsigned n)
433 {
434 /* references to INPUT register file always go back up to
435 * top level:
436 */
437 if (block->parent)
438 return block_input(block->parent, n);
439 return block->inputs[n];
440 }
441
442 /* return temporary in scope, creating if needed meta-input node
443 * to track block inputs
444 */
445 static struct ir3_instruction *
446 block_temporary(struct ir3_block *block, unsigned n)
447 {
448 /* references to TEMPORARY register file, find the nearest
449 * enclosing block which has already assigned this temporary,
450 * creating meta-input instructions along the way to keep
451 * track of block inputs
452 */
453 if (block->parent && !block->temporaries[n]) {
454 /* if already have input for this block, reuse: */
455 if (!block->inputs[n])
456 block->inputs[n] = block_temporary(block->parent, n);
457
458 /* and create new input to return: */
459 return create_input(block, block->inputs[n], n);
460 }
461 return block->temporaries[n];
462 }
463
464 static struct ir3_instruction *
465 create_immed(struct ir3_compile_context *ctx, float val)
466 {
467 /* NOTE: *don't* use instr_create() here!
468 */
469 struct ir3_instruction *instr;
470 instr = ir3_instr_create(ctx->block, 1, 0);
471 instr->cat1.src_type = get_ftype(ctx);
472 instr->cat1.dst_type = get_ftype(ctx);
473 ir3_reg_create(instr, 0, 0);
474 ir3_reg_create(instr, 0, IR3_REG_IMMED)->fim_val = val;
475 return instr;
476 }
477
478 static void
479 ssa_dst(struct ir3_compile_context *ctx, struct ir3_instruction *instr,
480 const struct tgsi_dst_register *dst, unsigned chan)
481 {
482 unsigned n = regid(dst->Index, chan);
483 unsigned idx = ctx->num_output_updates;
484
485 compile_assert(ctx, idx < ARRAY_SIZE(ctx->output_updates));
486
487 /* NOTE: defer update of temporaries[idx] or output[idx]
488 * until instr_finish(), so that if the current instruction
489 * reads the same TEMP/OUT[] it gets the old value:
490 *
491 * bleh.. this might be a bit easier to just figure out
492 * in instr_finish(). But at that point we've already
493 * lost information about OUTPUT vs TEMPORARY register
494 * file..
495 */
496
497 switch (dst->File) {
498 case TGSI_FILE_OUTPUT:
499 compile_assert(ctx, n < ctx->block->noutputs);
500 ctx->output_updates[idx].instrp = &ctx->block->outputs[n];
501 ctx->output_updates[idx].instr = instr;
502 ctx->num_output_updates++;
503 break;
504 case TGSI_FILE_TEMPORARY:
505 compile_assert(ctx, n < ctx->block->ntemporaries);
506 ctx->output_updates[idx].instrp = &ctx->block->temporaries[n];
507 ctx->output_updates[idx].instr = instr;
508 ctx->num_output_updates++;
509 break;
510 case TGSI_FILE_ADDRESS:
511 compile_assert(ctx, n < 1);
512 ctx->output_updates[idx].instrp = &ctx->block->address;
513 ctx->output_updates[idx].instr = instr;
514 ctx->num_output_updates++;
515 break;
516 }
517 }
518
519 static struct ir3_instruction *
520 ssa_instr(struct ir3_compile_context *ctx, unsigned file, unsigned n)
521 {
522 struct ir3_block *block = ctx->block;
523 struct ir3_instruction *instr = NULL;
524
525 switch (file) {
526 case TGSI_FILE_INPUT:
527 instr = block_input(ctx->block, n);
528 break;
529 case TGSI_FILE_OUTPUT:
530 /* really this should just happen in case of 'MOV_SAT OUT[n], ..',
531 * for the following clamp instructions:
532 */
533 instr = block->outputs[n];
534 /* we don't have to worry about read from an OUTPUT that was
535 * assigned outside of the current block, because the _SAT
536 * clamp instructions will always be in the same block as
537 * the original instruction which wrote the OUTPUT
538 */
539 compile_assert(ctx, instr);
540 break;
541 case TGSI_FILE_TEMPORARY:
542 instr = block_temporary(ctx->block, n);
543 if (!instr) {
544 /* this can happen when registers (or components of a TGSI
545 * register) are used as src before they have been assigned
546 * (undefined contents). To avoid confusing the rest of the
547 * compiler, and to generally keep things peachy, substitute
548 * an instruction that sets the src to 0.0. Or to keep
549 * things undefined, I could plug in a random number? :-P
550 *
551 * NOTE: *don't* use instr_create() here!
552 */
553 instr = create_immed(ctx, 0.0);
554 /* no need to recreate the immed for every access: */
555 block->temporaries[n] = instr;
556 }
557 break;
558 }
559
560 return instr;
561 }
562
563 static int array_id(struct ir3_compile_context *ctx,
564 const struct tgsi_src_register *src)
565 {
566 // XXX complete hack to recover tgsi_full_src_register...
567 // nothing that isn't wrapped in a tgsi_full_src_register
568 // should be indirect
569 const struct tgsi_full_src_register *fsrc = (const void *)src;
570 debug_assert(src->File != TGSI_FILE_CONSTANT);
571 return fsrc->Indirect.ArrayID + ctx->array_offsets[src->File];
572 }
573
574 static void
575 ssa_src(struct ir3_compile_context *ctx, struct ir3_register *reg,
576 const struct tgsi_src_register *src, unsigned chan)
577 {
578 struct ir3_instruction *instr;
579
580 if (src->Indirect && (src->File != TGSI_FILE_CONSTANT)) {
581 /* for relative addressing of gpr's (due to register assignment)
582 * we must generate a fanin instruction to collect all possible
583 * array elements that the instruction could address together:
584 */
585 unsigned i, j, aid = array_id(ctx, src);
586
587 if (ctx->array[aid].fanin) {
588 instr = ctx->array[aid].fanin;
589 } else {
590 unsigned first, last;
591
592 first = ctx->array[aid].first;
593 last = ctx->array[aid].last;
594
595 instr = ir3_instr_create2(ctx->block, -1, OPC_META_FI,
596 1 + (4 * (last + 1 - first)));
597 ir3_reg_create(instr, 0, 0);
598 for (i = first; i <= last; i++) {
599 for (j = 0; j < 4; j++) {
600 unsigned n = (i * 4) + j;
601 ir3_reg_create(instr, 0, IR3_REG_SSA)->instr =
602 ssa_instr(ctx, src->File, n);
603 }
604 }
605 ctx->array[aid].fanin = instr;
606 ctx->array_dirty |= (1 << aid);
607 }
608 } else {
609 /* normal case (not relative addressed GPR) */
610 instr = ssa_instr(ctx, src->File, regid(src->Index, chan));
611 }
612
613 if (instr) {
614 reg->flags |= IR3_REG_SSA;
615 reg->instr = instr;
616 } else if (reg->flags & IR3_REG_SSA) {
617 /* special hack for trans_samp() which calls ssa_src() directly
618 * to build up the collect (fanin) for const src.. (so SSA flag
619 * set but no src instr... it basically gets lucky because we
620 * default to 0.0 for "undefined" src instructions, which is
621 * what it wants. We probably need to give it a better way to
622 * do this, but for now this hack:
623 */
624 reg->instr = create_immed(ctx, 0.0);
625 }
626 }
627
628 static struct ir3_register *
629 add_dst_reg_wrmask(struct ir3_compile_context *ctx,
630 struct ir3_instruction *instr, const struct tgsi_dst_register *dst,
631 unsigned chan, unsigned wrmask)
632 {
633 unsigned flags = 0, num = 0;
634 struct ir3_register *reg;
635
636 switch (dst->File) {
637 case TGSI_FILE_OUTPUT:
638 case TGSI_FILE_TEMPORARY:
639 /* uses SSA */
640 break;
641 case TGSI_FILE_ADDRESS:
642 flags |= IR3_REG_ADDR;
643 /* uses SSA */
644 break;
645 default:
646 compile_error(ctx, "unsupported dst register file: %s\n",
647 tgsi_file_name(dst->File));
648 break;
649 }
650
651 if (dst->Indirect)
652 flags |= IR3_REG_RELATIV;
653
654 reg = ir3_reg_create(instr, regid(num, chan), flags);
655
656 reg->wrmask = wrmask;
657 if (wrmask == 0x1) {
658 /* normal case */
659 ssa_dst(ctx, instr, dst, chan);
660 } else if ((dst->File == TGSI_FILE_TEMPORARY) ||
661 (dst->File == TGSI_FILE_OUTPUT) ||
662 (dst->File == TGSI_FILE_ADDRESS)) {
663 struct ir3_instruction *prev = NULL;
664 unsigned i;
665
666 /* if instruction writes multiple, we need to create
667 * some place-holder collect the registers:
668 */
669 for (i = 0; i < 4; i++) {
670 /* NOTE: slightly ugly that we setup neighbor ptrs
671 * for FO here, but handle FI in CP pass.. we should
672 * probably just always setup neighbor ptrs in the
673 * frontend?
674 */
675 struct ir3_instruction *split =
676 ir3_instr_create(ctx->block, -1, OPC_META_FO);
677 split->fo.off = i;
678 /* unused dst reg: */
679 /* NOTE: set SSA flag on dst here, because unused FO's
680 * which don't get scheduled will end up not in the
681 * instruction list when RA sets SSA flag on each dst.
682 * Slight hack. We really should set SSA flag on
683 * every dst register in the frontend.
684 */
685 ir3_reg_create(split, 0, IR3_REG_SSA);
686 /* and src reg used to hold original instr */
687 ir3_reg_create(split, 0, IR3_REG_SSA)->instr = instr;
688 if (prev) {
689 split->cp.left = prev;
690 split->cp.left_cnt++;
691 prev->cp.right = split;
692 prev->cp.right_cnt++;
693 }
694 if ((wrmask & (1 << i)) && !ctx->atomic)
695 ssa_dst(ctx, split, dst, chan+i);
696 prev = split;
697 }
698 }
699
700 return reg;
701 }
702
703 static struct ir3_register *
704 add_dst_reg(struct ir3_compile_context *ctx, struct ir3_instruction *instr,
705 const struct tgsi_dst_register *dst, unsigned chan)
706 {
707 return add_dst_reg_wrmask(ctx, instr, dst, chan, 0x1);
708 }
709
710 static struct ir3_register *
711 add_src_reg_wrmask(struct ir3_compile_context *ctx,
712 struct ir3_instruction *instr, const struct tgsi_src_register *src,
713 unsigned chan, unsigned wrmask)
714 {
715 unsigned flags = 0, num = 0;
716 struct ir3_register *reg;
717 struct ir3_instruction *orig = NULL;
718
719 switch (src->File) {
720 case TGSI_FILE_IMMEDIATE:
721 /* TODO if possible, use actual immediate instead of const.. but
722 * TGSI has vec4 immediates, we can only embed scalar (of limited
723 * size, depending on instruction..)
724 */
725 flags |= IR3_REG_CONST;
726 num = src->Index + ctx->so->first_immediate;
727 break;
728 case TGSI_FILE_CONSTANT:
729 flags |= IR3_REG_CONST;
730 num = src->Index;
731 break;
732 case TGSI_FILE_OUTPUT:
733 /* NOTE: we should only end up w/ OUTPUT file for things like
734 * clamp()'ing saturated dst instructions
735 */
736 case TGSI_FILE_INPUT:
737 case TGSI_FILE_TEMPORARY:
738 /* uses SSA */
739 break;
740 default:
741 compile_error(ctx, "unsupported src register file: %s\n",
742 tgsi_file_name(src->File));
743 break;
744 }
745
746 /* We seem to have 8 bits (6.2) for dst register always, so I think
747 * it is safe to assume GPR cannot be >=64
748 *
749 * cat3 instructions only have 8 bits for src2, but cannot take a
750 * const for src2
751 *
752 * cat5 and cat6 in some cases only has 8 bits, but cannot take a
753 * const for any src.
754 *
755 * Other than that we seem to have 12 bits to encode const src,
756 * except for cat1 which may only have 11 bits (but that seems like
757 * a bug)
758 */
759 if (flags & IR3_REG_CONST)
760 compile_assert(ctx, src->Index < (1 << 9));
761 else
762 compile_assert(ctx, src->Index < (1 << 6));
763
764 if (src->Absolute)
765 flags |= IR3_REG_ABS;
766 if (src->Negate)
767 flags |= IR3_REG_NEGATE;
768
769 if (src->Indirect) {
770 flags |= IR3_REG_RELATIV;
771
772 /* shouldn't happen, and we can't cope with it below: */
773 compile_assert(ctx, wrmask == 0x1);
774
775 /* wrap in a meta-deref to track both the src and address: */
776 orig = instr;
777
778 instr = ir3_instr_create(ctx->block, -1, OPC_META_DEREF);
779 ir3_reg_create(instr, 0, 0);
780 ir3_reg_create(instr, 0, IR3_REG_SSA)->instr = ctx->block->address;
781
782 if (src->File != TGSI_FILE_CONSTANT) {
783 unsigned aid = array_id(ctx, src);
784 unsigned off = src->Index - ctx->array[aid].first; /* vec4 offset */
785 instr->deref.off = regid(off, chan);
786 }
787 }
788
789 reg = ir3_reg_create(instr, regid(num, chan), flags);
790
791 if (src->Indirect && (src->File != TGSI_FILE_CONSTANT)) {
792 unsigned aid = array_id(ctx, src);
793 reg->size = 4 * (1 + ctx->array[aid].last - ctx->array[aid].first);
794 } else {
795 reg->wrmask = wrmask;
796 }
797
798 if (wrmask == 0x1) {
799 /* normal case */
800 ssa_src(ctx, reg, src, chan);
801 } else if ((src->File == TGSI_FILE_TEMPORARY) ||
802 (src->File == TGSI_FILE_OUTPUT) ||
803 (src->File == TGSI_FILE_INPUT)) {
804 struct ir3_instruction *collect;
805 unsigned i;
806
807 compile_assert(ctx, !src->Indirect);
808
809 /* if instruction reads multiple, we need to create
810 * some place-holder collect the registers:
811 */
812 collect = ir3_instr_create(ctx->block, -1, OPC_META_FI);
813 ir3_reg_create(collect, 0, 0); /* unused dst reg */
814
815 for (i = 0; i < 4; i++) {
816 if (wrmask & (1 << i)) {
817 /* and src reg used point to the original instr */
818 ssa_src(ctx, ir3_reg_create(collect, 0, IR3_REG_SSA),
819 src, chan + i);
820 } else if (wrmask & ~((i << i) - 1)) {
821 /* if any remaining components, then dummy
822 * placeholder src reg to fill in the blanks:
823 */
824 ir3_reg_create(collect, 0, 0);
825 }
826 }
827
828 reg->flags |= IR3_REG_SSA;
829 reg->instr = collect;
830 }
831
832 if (src->Indirect) {
833 unsigned size = reg->size;
834
835 reg = ir3_reg_create(orig, 0, flags | IR3_REG_SSA);
836 reg->instr = instr;
837 reg->size = size;
838 }
839 return reg;
840 }
841
842 static struct ir3_register *
843 add_src_reg(struct ir3_compile_context *ctx, struct ir3_instruction *instr,
844 const struct tgsi_src_register *src, unsigned chan)
845 {
846 return add_src_reg_wrmask(ctx, instr, src, chan, 0x1);
847 }
848
849 static void
850 src_from_dst(struct tgsi_src_register *src, struct tgsi_dst_register *dst)
851 {
852 src->File = dst->File;
853 src->Indirect = dst->Indirect;
854 src->Dimension = dst->Dimension;
855 src->Index = dst->Index;
856 src->Absolute = 0;
857 src->Negate = 0;
858 src->SwizzleX = TGSI_SWIZZLE_X;
859 src->SwizzleY = TGSI_SWIZZLE_Y;
860 src->SwizzleZ = TGSI_SWIZZLE_Z;
861 src->SwizzleW = TGSI_SWIZZLE_W;
862 }
863
864 /* Get internal-temp src/dst to use for a sequence of instructions
865 * generated by a single TGSI op.
866 */
867 static struct tgsi_src_register *
868 get_internal_temp(struct ir3_compile_context *ctx,
869 struct tgsi_dst_register *tmp_dst)
870 {
871 struct tgsi_src_register *tmp_src;
872 int n;
873
874 tmp_dst->File = TGSI_FILE_TEMPORARY;
875 tmp_dst->WriteMask = TGSI_WRITEMASK_XYZW;
876 tmp_dst->Indirect = 0;
877 tmp_dst->Dimension = 0;
878
879 /* assign next temporary: */
880 n = ctx->num_internal_temps++;
881 compile_assert(ctx, n < ARRAY_SIZE(ctx->internal_temps));
882 tmp_src = &ctx->internal_temps[n];
883
884 tmp_dst->Index = ctx->info.file_max[TGSI_FILE_TEMPORARY] + n + 1;
885
886 src_from_dst(tmp_src, tmp_dst);
887
888 return tmp_src;
889 }
890
891 static inline bool
892 is_const(struct tgsi_src_register *src)
893 {
894 return (src->File == TGSI_FILE_CONSTANT) ||
895 (src->File == TGSI_FILE_IMMEDIATE);
896 }
897
898 static inline bool
899 is_relative(struct tgsi_src_register *src)
900 {
901 return src->Indirect;
902 }
903
904 static inline bool
905 is_rel_or_const(struct tgsi_src_register *src)
906 {
907 return is_relative(src) || is_const(src);
908 }
909
910 static type_t
911 get_ftype(struct ir3_compile_context *ctx)
912 {
913 return TYPE_F32;
914 }
915
916 static type_t
917 get_utype(struct ir3_compile_context *ctx)
918 {
919 return TYPE_U32;
920 }
921
922 static type_t
923 get_stype(struct ir3_compile_context *ctx)
924 {
925 return TYPE_S32;
926 }
927
928 static unsigned
929 src_swiz(struct tgsi_src_register *src, int chan)
930 {
931 switch (chan) {
932 case 0: return src->SwizzleX;
933 case 1: return src->SwizzleY;
934 case 2: return src->SwizzleZ;
935 case 3: return src->SwizzleW;
936 }
937 assert(0);
938 return 0;
939 }
940
941 /* for instructions that cannot take a const register as src, if needed
942 * generate a move to temporary gpr:
943 */
944 static struct tgsi_src_register *
945 get_unconst(struct ir3_compile_context *ctx, struct tgsi_src_register *src)
946 {
947 struct tgsi_dst_register tmp_dst;
948 struct tgsi_src_register *tmp_src;
949
950 compile_assert(ctx, is_rel_or_const(src));
951
952 tmp_src = get_internal_temp(ctx, &tmp_dst);
953
954 create_mov(ctx, &tmp_dst, src);
955
956 return tmp_src;
957 }
958
959 static void
960 get_immediate(struct ir3_compile_context *ctx,
961 struct tgsi_src_register *reg, uint32_t val)
962 {
963 unsigned neg, swiz, idx, i;
964 /* actually maps 1:1 currently.. not sure if that is safe to rely on: */
965 static const unsigned swiz2tgsi[] = {
966 TGSI_SWIZZLE_X, TGSI_SWIZZLE_Y, TGSI_SWIZZLE_Z, TGSI_SWIZZLE_W,
967 };
968
969 for (i = 0; i < ctx->immediate_idx; i++) {
970 swiz = i % 4;
971 idx = i / 4;
972
973 if (ctx->so->immediates[idx].val[swiz] == val) {
974 neg = 0;
975 break;
976 }
977
978 if (ctx->so->immediates[idx].val[swiz] == -val) {
979 neg = 1;
980 break;
981 }
982 }
983
984 if (i == ctx->immediate_idx) {
985 /* need to generate a new immediate: */
986 swiz = i % 4;
987 idx = i / 4;
988 neg = 0;
989 ctx->so->immediates[idx].val[swiz] = val;
990 ctx->so->immediates_count = idx + 1;
991 ctx->immediate_idx++;
992 }
993
994 reg->File = TGSI_FILE_IMMEDIATE;
995 reg->Indirect = 0;
996 reg->Dimension = 0;
997 reg->Index = idx;
998 reg->Absolute = 0;
999 reg->Negate = neg;
1000 reg->SwizzleX = swiz2tgsi[swiz];
1001 reg->SwizzleY = swiz2tgsi[swiz];
1002 reg->SwizzleZ = swiz2tgsi[swiz];
1003 reg->SwizzleW = swiz2tgsi[swiz];
1004 }
1005
1006 static void
1007 create_mov(struct ir3_compile_context *ctx, struct tgsi_dst_register *dst,
1008 struct tgsi_src_register *src)
1009 {
1010 type_t type_mov = get_ftype(ctx);
1011 unsigned i;
1012
1013 for (i = 0; i < 4; i++) {
1014 /* move to destination: */
1015 if (dst->WriteMask & (1 << i)) {
1016 struct ir3_instruction *instr;
1017
1018 if (src->Absolute || src->Negate) {
1019 /* can't have abs or neg on a mov instr, so use
1020 * absneg.f instead to handle these cases:
1021 */
1022 instr = instr_create(ctx, 2, OPC_ABSNEG_F);
1023 } else {
1024 instr = instr_create(ctx, 1, 0);
1025 instr->cat1.src_type = type_mov;
1026 instr->cat1.dst_type = type_mov;
1027 }
1028
1029 add_dst_reg(ctx, instr, dst, i);
1030 add_src_reg(ctx, instr, src, src_swiz(src, i));
1031 }
1032 }
1033 }
1034
1035 static void
1036 create_clamp(struct ir3_compile_context *ctx,
1037 struct tgsi_dst_register *dst, struct tgsi_src_register *val,
1038 struct tgsi_src_register *minval, struct tgsi_src_register *maxval)
1039 {
1040 struct ir3_instruction *instr;
1041
1042 instr = instr_create(ctx, 2, OPC_MAX_F);
1043 vectorize(ctx, instr, dst, 2, val, 0, minval, 0);
1044
1045 instr = instr_create(ctx, 2, OPC_MIN_F);
1046 vectorize(ctx, instr, dst, 2, val, 0, maxval, 0);
1047 }
1048
1049 static void
1050 create_clamp_imm(struct ir3_compile_context *ctx,
1051 struct tgsi_dst_register *dst,
1052 uint32_t minval, uint32_t maxval)
1053 {
1054 struct tgsi_src_register minconst, maxconst;
1055 struct tgsi_src_register src;
1056
1057 src_from_dst(&src, dst);
1058
1059 get_immediate(ctx, &minconst, minval);
1060 get_immediate(ctx, &maxconst, maxval);
1061
1062 create_clamp(ctx, dst, &src, &minconst, &maxconst);
1063 }
1064
1065 static struct tgsi_dst_register *
1066 get_dst(struct ir3_compile_context *ctx, struct tgsi_full_instruction *inst)
1067 {
1068 struct tgsi_dst_register *dst = &inst->Dst[0].Register;
1069 unsigned i;
1070
1071 compile_assert(ctx, !ctx->using_tmp_dst);
1072 ctx->using_tmp_dst = true;
1073
1074 for (i = 0; i < inst->Instruction.NumSrcRegs; i++) {
1075 struct tgsi_src_register *src = &inst->Src[i].Register;
1076 if ((src->File == dst->File) && (src->Index == dst->Index)) {
1077 if ((dst->WriteMask == TGSI_WRITEMASK_XYZW) &&
1078 (src->SwizzleX == TGSI_SWIZZLE_X) &&
1079 (src->SwizzleY == TGSI_SWIZZLE_Y) &&
1080 (src->SwizzleZ == TGSI_SWIZZLE_Z) &&
1081 (src->SwizzleW == TGSI_SWIZZLE_W))
1082 continue;
1083 ctx->tmp_src = get_internal_temp(ctx, &ctx->tmp_dst);
1084 ctx->tmp_dst.WriteMask = dst->WriteMask;
1085 dst = &ctx->tmp_dst;
1086 break;
1087 }
1088 }
1089 return dst;
1090 }
1091
1092 static void
1093 put_dst(struct ir3_compile_context *ctx, struct tgsi_full_instruction *inst,
1094 struct tgsi_dst_register *dst)
1095 {
1096 compile_assert(ctx, ctx->using_tmp_dst);
1097 ctx->using_tmp_dst = false;
1098
1099 /* if necessary, add mov back into original dst: */
1100 if (dst != &inst->Dst[0].Register) {
1101 create_mov(ctx, &inst->Dst[0].Register, ctx->tmp_src);
1102 }
1103 }
1104
1105 /* helper to generate the necessary repeat and/or additional instructions
1106 * to turn a scalar instruction into a vector operation:
1107 */
1108 static void
1109 vectorize(struct ir3_compile_context *ctx, struct ir3_instruction *instr,
1110 struct tgsi_dst_register *dst, int nsrcs, ...)
1111 {
1112 va_list ap;
1113 int i, j, n = 0;
1114
1115 instr_atomic_start(ctx);
1116
1117 for (i = 0; i < 4; i++) {
1118 if (dst->WriteMask & (1 << i)) {
1119 struct ir3_instruction *cur;
1120
1121 if (n++ == 0) {
1122 cur = instr;
1123 } else {
1124 cur = instr_create(ctx, instr->category, instr->opc);
1125 memcpy(cur->info, instr->info, sizeof(cur->info));
1126 }
1127
1128 add_dst_reg(ctx, cur, dst, i);
1129
1130 va_start(ap, nsrcs);
1131 for (j = 0; j < nsrcs; j++) {
1132 struct tgsi_src_register *src =
1133 va_arg(ap, struct tgsi_src_register *);
1134 unsigned flags = va_arg(ap, unsigned);
1135 struct ir3_register *reg;
1136 if (flags & IR3_REG_IMMED) {
1137 reg = ir3_reg_create(cur, 0, IR3_REG_IMMED);
1138 /* this is an ugly cast.. should have put flags first! */
1139 reg->iim_val = *(int *)&src;
1140 } else {
1141 reg = add_src_reg(ctx, cur, src, src_swiz(src, i));
1142 }
1143 reg->flags |= flags & ~IR3_REG_NEGATE;
1144 if (flags & IR3_REG_NEGATE)
1145 reg->flags ^= IR3_REG_NEGATE;
1146 }
1147 va_end(ap);
1148 }
1149 }
1150
1151 instr_atomic_end(ctx);
1152 }
1153
1154 /*
1155 * Handlers for TGSI instructions which do not have a 1:1 mapping to
1156 * native instructions:
1157 */
1158
1159 static void
1160 trans_clamp(const struct instr_translater *t,
1161 struct ir3_compile_context *ctx,
1162 struct tgsi_full_instruction *inst)
1163 {
1164 struct tgsi_dst_register *dst = get_dst(ctx, inst);
1165 struct tgsi_src_register *src0 = &inst->Src[0].Register;
1166 struct tgsi_src_register *src1 = &inst->Src[1].Register;
1167 struct tgsi_src_register *src2 = &inst->Src[2].Register;
1168
1169 create_clamp(ctx, dst, src0, src1, src2);
1170
1171 put_dst(ctx, inst, dst);
1172 }
1173
1174 /* ARL(x) = x, but mova from hrN.x to a0.. */
1175 static void
1176 trans_arl(const struct instr_translater *t,
1177 struct ir3_compile_context *ctx,
1178 struct tgsi_full_instruction *inst)
1179 {
1180 struct ir3_instruction *instr;
1181 struct tgsi_dst_register tmp_dst;
1182 struct tgsi_src_register *tmp_src;
1183 struct tgsi_dst_register *dst = &inst->Dst[0].Register;
1184 struct tgsi_src_register *src = &inst->Src[0].Register;
1185 unsigned chan = src->SwizzleX;
1186
1187 compile_assert(ctx, dst->File == TGSI_FILE_ADDRESS);
1188
1189 /* NOTE: we allocate a temporary from a flat register
1190 * namespace (ignoring half vs full). It turns out
1191 * not to really matter since registers get reassigned
1192 * later in ir3_ra which (hopefully!) can deal a bit
1193 * better with mixed half and full precision.
1194 */
1195 tmp_src = get_internal_temp(ctx, &tmp_dst);
1196
1197 /* cov.{u,f}{32,16}s16 Rtmp, Rsrc */
1198 instr = instr_create(ctx, 1, 0);
1199 instr->cat1.src_type = (t->tgsi_opc == TGSI_OPCODE_ARL) ?
1200 get_ftype(ctx) : get_utype(ctx);
1201 instr->cat1.dst_type = TYPE_S16;
1202 add_dst_reg(ctx, instr, &tmp_dst, chan)->flags |= IR3_REG_HALF;
1203 add_src_reg(ctx, instr, src, chan);
1204
1205 /* shl.b Rtmp, Rtmp, 2 */
1206 instr = instr_create(ctx, 2, OPC_SHL_B);
1207 add_dst_reg(ctx, instr, &tmp_dst, chan)->flags |= IR3_REG_HALF;
1208 add_src_reg(ctx, instr, tmp_src, chan)->flags |= IR3_REG_HALF;
1209 ir3_reg_create(instr, 0, IR3_REG_IMMED)->iim_val = 2;
1210
1211 /* mova a0, Rtmp */
1212 instr = instr_create(ctx, 1, 0);
1213 instr->cat1.src_type = TYPE_S16;
1214 instr->cat1.dst_type = TYPE_S16;
1215 add_dst_reg(ctx, instr, dst, 0)->flags |= IR3_REG_HALF;
1216 add_src_reg(ctx, instr, tmp_src, chan)->flags |= IR3_REG_HALF;
1217 }
1218
1219 /*
1220 * texture fetch/sample instructions:
1221 */
1222
1223 struct tex_info {
1224 int8_t order[4];
1225 int8_t args;
1226 unsigned src_wrmask, flags;
1227 };
1228
1229 struct target_info {
1230 uint8_t dims;
1231 uint8_t cube;
1232 uint8_t array;
1233 uint8_t shadow;
1234 };
1235
1236 static const struct target_info tex_targets[] = {
1237 [TGSI_TEXTURE_1D] = { 1, 0, 0, 0 },
1238 [TGSI_TEXTURE_2D] = { 2, 0, 0, 0 },
1239 [TGSI_TEXTURE_3D] = { 3, 0, 0, 0 },
1240 [TGSI_TEXTURE_CUBE] = { 3, 1, 0, 0 },
1241 [TGSI_TEXTURE_RECT] = { 2, 0, 0, 0 },
1242 [TGSI_TEXTURE_SHADOW1D] = { 1, 0, 0, 1 },
1243 [TGSI_TEXTURE_SHADOW2D] = { 2, 0, 0, 1 },
1244 [TGSI_TEXTURE_SHADOWRECT] = { 2, 0, 0, 1 },
1245 [TGSI_TEXTURE_1D_ARRAY] = { 1, 0, 1, 0 },
1246 [TGSI_TEXTURE_2D_ARRAY] = { 2, 0, 1, 0 },
1247 [TGSI_TEXTURE_SHADOW1D_ARRAY] = { 1, 0, 1, 1 },
1248 [TGSI_TEXTURE_SHADOW2D_ARRAY] = { 2, 0, 1, 1 },
1249 [TGSI_TEXTURE_SHADOWCUBE] = { 3, 1, 0, 1 },
1250 [TGSI_TEXTURE_2D_MSAA] = { 2, 0, 0, 0 },
1251 [TGSI_TEXTURE_2D_ARRAY_MSAA] = { 2, 0, 1, 0 },
1252 [TGSI_TEXTURE_CUBE_ARRAY] = { 3, 1, 1, 0 },
1253 [TGSI_TEXTURE_SHADOWCUBE_ARRAY] = { 3, 1, 1, 1 },
1254 };
1255
1256 static void
1257 fill_tex_info(struct ir3_compile_context *ctx,
1258 struct tgsi_full_instruction *inst,
1259 struct tex_info *info)
1260 {
1261 const struct target_info *tgt = &tex_targets[inst->Texture.Texture];
1262
1263 if (tgt->dims == 3)
1264 info->flags |= IR3_INSTR_3D;
1265 if (tgt->array)
1266 info->flags |= IR3_INSTR_A;
1267 if (tgt->shadow)
1268 info->flags |= IR3_INSTR_S;
1269
1270 switch (inst->Instruction.Opcode) {
1271 case TGSI_OPCODE_TXB:
1272 case TGSI_OPCODE_TXB2:
1273 case TGSI_OPCODE_TXL:
1274 case TGSI_OPCODE_TXF:
1275 info->args = 2;
1276 break;
1277 case TGSI_OPCODE_TXP:
1278 info->flags |= IR3_INSTR_P;
1279 /* fallthrough */
1280 case TGSI_OPCODE_TEX:
1281 case TGSI_OPCODE_TXD:
1282 info->args = 1;
1283 break;
1284 }
1285
1286 /*
1287 * lay out the first argument in the proper order:
1288 * - actual coordinates first
1289 * - array index
1290 * - shadow reference
1291 * - projection w
1292 *
1293 * bias/lod go into the second arg
1294 */
1295 int arg, pos = 0;
1296 for (arg = 0; arg < tgt->dims; arg++)
1297 info->order[arg] = pos++;
1298 if (tgt->dims == 1)
1299 info->order[pos++] = -1;
1300 if (tgt->shadow)
1301 info->order[pos++] = MAX2(arg + tgt->array, 2);
1302 if (tgt->array)
1303 info->order[pos++] = arg++;
1304 if (info->flags & IR3_INSTR_P)
1305 info->order[pos++] = 3;
1306
1307 info->src_wrmask = (1 << pos) - 1;
1308
1309 for (; pos < 4; pos++)
1310 info->order[pos] = -1;
1311
1312 assert(pos <= 4);
1313 }
1314
1315 static bool check_swiz(struct tgsi_src_register *src, const int8_t order[4])
1316 {
1317 unsigned i;
1318 for (i = 1; (i < 4) && order[i] >= 0; i++)
1319 if (src_swiz(src, i) != (src_swiz(src, 0) + order[i]))
1320 return false;
1321 return true;
1322 }
1323
1324 static bool is_1d(unsigned tex)
1325 {
1326 return tex_targets[tex].dims == 1;
1327 }
1328
1329 static struct tgsi_src_register *
1330 get_tex_coord(struct ir3_compile_context *ctx,
1331 struct tgsi_full_instruction *inst,
1332 const struct tex_info *tinf)
1333 {
1334 struct tgsi_src_register *coord = &inst->Src[0].Register;
1335 struct ir3_instruction *instr;
1336 unsigned tex = inst->Texture.Texture;
1337 struct tgsi_dst_register tmp_dst;
1338 struct tgsi_src_register *tmp_src;
1339 type_t type_mov = get_ftype(ctx);
1340 unsigned j;
1341
1342 /* need to move things around: */
1343 tmp_src = get_internal_temp(ctx, &tmp_dst);
1344
1345 for (j = 0; j < 4; j++) {
1346 if (tinf->order[j] < 0)
1347 continue;
1348 instr = instr_create(ctx, 1, 0); /* mov */
1349 instr->cat1.src_type = type_mov;
1350 instr->cat1.dst_type = type_mov;
1351 add_dst_reg(ctx, instr, &tmp_dst, j);
1352 add_src_reg(ctx, instr, coord,
1353 src_swiz(coord, tinf->order[j]));
1354 }
1355
1356 /* fix up .y coord: */
1357 if (is_1d(tex)) {
1358 struct ir3_register *imm;
1359 instr = instr_create(ctx, 1, 0); /* mov */
1360 instr->cat1.src_type = type_mov;
1361 instr->cat1.dst_type = type_mov;
1362 add_dst_reg(ctx, instr, &tmp_dst, 1); /* .y */
1363 imm = ir3_reg_create(instr, 0, IR3_REG_IMMED);
1364 if (inst->Instruction.Opcode == TGSI_OPCODE_TXF)
1365 imm->iim_val = 0;
1366 else
1367 imm->fim_val = 0.5;
1368 }
1369
1370 return tmp_src;
1371 }
1372
1373 static void
1374 trans_samp(const struct instr_translater *t,
1375 struct ir3_compile_context *ctx,
1376 struct tgsi_full_instruction *inst)
1377 {
1378 struct ir3_instruction *instr, *collect;
1379 struct ir3_register *reg;
1380 struct tgsi_dst_register *dst = &inst->Dst[0].Register;
1381 struct tgsi_src_register *orig, *coord, *samp, *offset, *dpdx, *dpdy;
1382 struct tgsi_src_register zero;
1383 const struct target_info *tgt = &tex_targets[inst->Texture.Texture];
1384 struct tex_info tinf;
1385 int i;
1386
1387 memset(&tinf, 0, sizeof(tinf));
1388 fill_tex_info(ctx, inst, &tinf);
1389 coord = get_tex_coord(ctx, inst, &tinf);
1390 get_immediate(ctx, &zero, 0);
1391
1392 switch (inst->Instruction.Opcode) {
1393 case TGSI_OPCODE_TXB2:
1394 orig = &inst->Src[1].Register;
1395 samp = &inst->Src[2].Register;
1396 break;
1397 case TGSI_OPCODE_TXD:
1398 orig = &inst->Src[0].Register;
1399 dpdx = &inst->Src[1].Register;
1400 dpdy = &inst->Src[2].Register;
1401 samp = &inst->Src[3].Register;
1402 if (is_rel_or_const(dpdx))
1403 dpdx = get_unconst(ctx, dpdx);
1404 if (is_rel_or_const(dpdy))
1405 dpdy = get_unconst(ctx, dpdy);
1406 break;
1407 default:
1408 orig = &inst->Src[0].Register;
1409 samp = &inst->Src[1].Register;
1410 break;
1411 }
1412 if (tinf.args > 1 && is_rel_or_const(orig))
1413 orig = get_unconst(ctx, orig);
1414
1415 /* scale up integer coords for TXF based on the LOD */
1416 if (inst->Instruction.Opcode == TGSI_OPCODE_TXF) {
1417 struct tgsi_dst_register tmp_dst;
1418 struct tgsi_src_register *tmp_src;
1419 type_t type_mov = get_utype(ctx);
1420
1421 tmp_src = get_internal_temp(ctx, &tmp_dst);
1422 for (i = 0; i < tgt->dims; i++) {
1423 instr = instr_create(ctx, 2, OPC_SHL_B);
1424 add_dst_reg(ctx, instr, &tmp_dst, i);
1425 add_src_reg(ctx, instr, coord, src_swiz(coord, i));
1426 add_src_reg(ctx, instr, orig, orig->SwizzleW);
1427 }
1428 if (tgt->dims < 2) {
1429 instr = instr_create(ctx, 1, 0);
1430 instr->cat1.src_type = type_mov;
1431 instr->cat1.dst_type = type_mov;
1432 add_dst_reg(ctx, instr, &tmp_dst, i);
1433 add_src_reg(ctx, instr, &zero, 0);
1434 i++;
1435 }
1436 if (tgt->array) {
1437 instr = instr_create(ctx, 1, 0);
1438 instr->cat1.src_type = type_mov;
1439 instr->cat1.dst_type = type_mov;
1440 add_dst_reg(ctx, instr, &tmp_dst, i);
1441 add_src_reg(ctx, instr, coord, src_swiz(coord, i));
1442 }
1443 coord = tmp_src;
1444 }
1445
1446 if (inst->Texture.NumOffsets) {
1447 struct tgsi_texture_offset *tex_offset = &inst->TexOffsets[0];
1448 struct tgsi_src_register offset_src = {0};
1449
1450 offset_src.File = tex_offset->File;
1451 offset_src.Index = tex_offset->Index;
1452 offset_src.SwizzleX = tex_offset->SwizzleX;
1453 offset_src.SwizzleY = tex_offset->SwizzleY;
1454 offset_src.SwizzleZ = tex_offset->SwizzleZ;
1455 offset = get_unconst(ctx, &offset_src);
1456 tinf.flags |= IR3_INSTR_O;
1457 }
1458
1459 instr = instr_create(ctx, 5, t->opc);
1460 if (ctx->integer_s & (1 << samp->Index))
1461 instr->cat5.type = get_utype(ctx);
1462 else
1463 instr->cat5.type = get_ftype(ctx);
1464 instr->cat5.samp = samp->Index;
1465 instr->cat5.tex = samp->Index;
1466 instr->flags |= tinf.flags;
1467
1468 add_dst_reg_wrmask(ctx, instr, dst, 0, dst->WriteMask);
1469
1470 reg = ir3_reg_create(instr, 0, IR3_REG_SSA);
1471
1472 collect = ir3_instr_create2(ctx->block, -1, OPC_META_FI, 12);
1473 ir3_reg_create(collect, 0, 0);
1474 for (i = 0; i < 4; i++) {
1475 if (tinf.src_wrmask & (1 << i))
1476 ssa_src(ctx, ir3_reg_create(collect, 0, IR3_REG_SSA),
1477 coord, src_swiz(coord, i));
1478 else if (tinf.src_wrmask & ~((1 << i) - 1))
1479 ir3_reg_create(collect, 0, 0);
1480 }
1481
1482 /* Attach derivatives onto the end of the fan-in. Derivatives start after
1483 * the 4th argument, so make sure that fi is padded up to 4 first.
1484 */
1485 if (inst->Instruction.Opcode == TGSI_OPCODE_TXD) {
1486 while (collect->regs_count < 5)
1487 ssa_src(ctx, ir3_reg_create(collect, 0, IR3_REG_SSA), &zero, 0);
1488 for (i = 0; i < tgt->dims; i++)
1489 ssa_src(ctx, ir3_reg_create(collect, 0, IR3_REG_SSA), dpdx, i);
1490 if (tgt->dims < 2)
1491 ssa_src(ctx, ir3_reg_create(collect, 0, IR3_REG_SSA), &zero, 0);
1492 for (i = 0; i < tgt->dims; i++)
1493 ssa_src(ctx, ir3_reg_create(collect, 0, IR3_REG_SSA), dpdy, i);
1494 if (tgt->dims < 2)
1495 ssa_src(ctx, ir3_reg_create(collect, 0, IR3_REG_SSA), &zero, 0);
1496 tinf.src_wrmask |= ((1 << (2 * MAX2(tgt->dims, 2))) - 1) << 4;
1497 }
1498
1499 reg->instr = collect;
1500 reg->wrmask = tinf.src_wrmask;
1501
1502 /* The second argument contains the offsets, followed by the lod/bias
1503 * argument. This is constructed more manually due to the dynamic nature.
1504 */
1505 if (inst->Texture.NumOffsets == 0 && tinf.args == 1)
1506 return;
1507
1508 reg = ir3_reg_create(instr, 0, IR3_REG_SSA);
1509
1510 collect = ir3_instr_create2(ctx->block, -1, OPC_META_FI, 5);
1511 ir3_reg_create(collect, 0, 0);
1512
1513 if (inst->Texture.NumOffsets) {
1514 for (i = 0; i < tgt->dims; i++)
1515 ssa_src(ctx, ir3_reg_create(collect, 0, IR3_REG_SSA),
1516 offset, i);
1517 if (tgt->dims < 2)
1518 ssa_src(ctx, ir3_reg_create(collect, 0, IR3_REG_SSA), &zero, 0);
1519 }
1520 if (inst->Instruction.Opcode == TGSI_OPCODE_TXB2)
1521 ssa_src(ctx, ir3_reg_create(collect, 0, IR3_REG_SSA),
1522 orig, orig->SwizzleX);
1523 else if (tinf.args > 1)
1524 ssa_src(ctx, ir3_reg_create(collect, 0, IR3_REG_SSA),
1525 orig, orig->SwizzleW);
1526
1527 reg->instr = collect;
1528 reg->wrmask = (1 << (collect->regs_count - 1)) - 1;
1529 }
1530
1531 static void
1532 trans_txq(const struct instr_translater *t,
1533 struct ir3_compile_context *ctx,
1534 struct tgsi_full_instruction *inst)
1535 {
1536 struct ir3_instruction *instr;
1537 struct tgsi_dst_register *dst = &inst->Dst[0].Register;
1538 struct tgsi_src_register *level = &inst->Src[0].Register;
1539 struct tgsi_src_register *samp = &inst->Src[1].Register;
1540 struct tex_info tinf;
1541
1542 memset(&tinf, 0, sizeof(tinf));
1543 fill_tex_info(ctx, inst, &tinf);
1544 if (is_rel_or_const(level))
1545 level = get_unconst(ctx, level);
1546
1547 instr = instr_create(ctx, 5, OPC_GETSIZE);
1548 instr->cat5.type = get_utype(ctx);
1549 instr->cat5.samp = samp->Index;
1550 instr->cat5.tex = samp->Index;
1551 instr->flags |= tinf.flags;
1552
1553 add_dst_reg_wrmask(ctx, instr, dst, 0, dst->WriteMask);
1554 add_src_reg_wrmask(ctx, instr, level, level->SwizzleX, 0x1);
1555 }
1556
1557 /* DDX/DDY */
1558 static void
1559 trans_deriv(const struct instr_translater *t,
1560 struct ir3_compile_context *ctx,
1561 struct tgsi_full_instruction *inst)
1562 {
1563 struct ir3_instruction *instr;
1564 struct tgsi_dst_register *dst = &inst->Dst[0].Register;
1565 struct tgsi_src_register *src = &inst->Src[0].Register;
1566 static const int8_t order[4] = {0, 1, 2, 3};
1567
1568 if (!check_swiz(src, order)) {
1569 struct tgsi_dst_register tmp_dst;
1570 struct tgsi_src_register *tmp_src;
1571
1572 tmp_src = get_internal_temp(ctx, &tmp_dst);
1573 create_mov(ctx, &tmp_dst, src);
1574
1575 src = tmp_src;
1576 }
1577
1578 /* This might be a workaround for hw bug? Blob compiler always
1579 * seems to work two components at a time for dsy/dsx. It does
1580 * actually seem to work in some cases (or at least some piglit
1581 * tests) for four components at a time. But seems more reliable
1582 * to split this into two instructions like the blob compiler
1583 * does:
1584 */
1585
1586 instr = instr_create(ctx, 5, t->opc);
1587 instr->cat5.type = get_ftype(ctx);
1588 add_dst_reg_wrmask(ctx, instr, dst, 0, dst->WriteMask & 0x3);
1589 add_src_reg_wrmask(ctx, instr, src, 0, dst->WriteMask & 0x3);
1590
1591 instr = instr_create(ctx, 5, t->opc);
1592 instr->cat5.type = get_ftype(ctx);
1593 add_dst_reg_wrmask(ctx, instr, dst, 2, (dst->WriteMask >> 2) & 0x3);
1594 add_src_reg_wrmask(ctx, instr, src, 2, (dst->WriteMask >> 2) & 0x3);
1595 }
1596
1597 /*
1598 * SEQ(a,b) = (a == b) ? 1.0 : 0.0
1599 * cmps.f.eq tmp0, a, b
1600 * cov.u16f16 dst, tmp0
1601 *
1602 * SNE(a,b) = (a != b) ? 1.0 : 0.0
1603 * cmps.f.ne tmp0, a, b
1604 * cov.u16f16 dst, tmp0
1605 *
1606 * SGE(a,b) = (a >= b) ? 1.0 : 0.0
1607 * cmps.f.ge tmp0, a, b
1608 * cov.u16f16 dst, tmp0
1609 *
1610 * SLE(a,b) = (a <= b) ? 1.0 : 0.0
1611 * cmps.f.le tmp0, a, b
1612 * cov.u16f16 dst, tmp0
1613 *
1614 * SGT(a,b) = (a > b) ? 1.0 : 0.0
1615 * cmps.f.gt tmp0, a, b
1616 * cov.u16f16 dst, tmp0
1617 *
1618 * SLT(a,b) = (a < b) ? 1.0 : 0.0
1619 * cmps.f.lt tmp0, a, b
1620 * cov.u16f16 dst, tmp0
1621 *
1622 * CMP(a,b,c) = (a < 0.0) ? b : c
1623 * cmps.f.lt tmp0, a, {0.0}
1624 * sel.b16 dst, b, tmp0, c
1625 */
1626 static void
1627 trans_cmp(const struct instr_translater *t,
1628 struct ir3_compile_context *ctx,
1629 struct tgsi_full_instruction *inst)
1630 {
1631 struct ir3_instruction *instr;
1632 struct tgsi_dst_register tmp_dst;
1633 struct tgsi_src_register *tmp_src;
1634 struct tgsi_src_register constval0;
1635 /* final instruction for CMP() uses orig src1 and src2: */
1636 struct tgsi_dst_register *dst = get_dst(ctx, inst);
1637 struct tgsi_src_register *a0, *a1, *a2;
1638 unsigned condition;
1639
1640 tmp_src = get_internal_temp(ctx, &tmp_dst);
1641
1642 a0 = &inst->Src[0].Register; /* a */
1643 a1 = &inst->Src[1].Register; /* b */
1644
1645 switch (t->tgsi_opc) {
1646 case TGSI_OPCODE_SEQ:
1647 case TGSI_OPCODE_FSEQ:
1648 condition = IR3_COND_EQ;
1649 break;
1650 case TGSI_OPCODE_SNE:
1651 case TGSI_OPCODE_FSNE:
1652 condition = IR3_COND_NE;
1653 break;
1654 case TGSI_OPCODE_SGE:
1655 case TGSI_OPCODE_FSGE:
1656 condition = IR3_COND_GE;
1657 break;
1658 case TGSI_OPCODE_SLT:
1659 case TGSI_OPCODE_FSLT:
1660 condition = IR3_COND_LT;
1661 break;
1662 case TGSI_OPCODE_SLE:
1663 condition = IR3_COND_LE;
1664 break;
1665 case TGSI_OPCODE_SGT:
1666 condition = IR3_COND_GT;
1667 break;
1668 case TGSI_OPCODE_CMP:
1669 get_immediate(ctx, &constval0, fui(0.0));
1670 a0 = &inst->Src[0].Register; /* a */
1671 a1 = &constval0; /* {0.0} */
1672 condition = IR3_COND_LT;
1673 break;
1674 default:
1675 compile_assert(ctx, 0);
1676 return;
1677 }
1678
1679 if (is_const(a0) && is_const(a1))
1680 a0 = get_unconst(ctx, a0);
1681
1682 /* cmps.f.<cond> tmp, a0, a1 */
1683 instr = instr_create(ctx, 2, OPC_CMPS_F);
1684 instr->cat2.condition = condition;
1685 vectorize(ctx, instr, &tmp_dst, 2, a0, 0, a1, 0);
1686
1687 switch (t->tgsi_opc) {
1688 case TGSI_OPCODE_SEQ:
1689 case TGSI_OPCODE_SGE:
1690 case TGSI_OPCODE_SLE:
1691 case TGSI_OPCODE_SNE:
1692 case TGSI_OPCODE_SGT:
1693 case TGSI_OPCODE_SLT:
1694 /* cov.u16f16 dst, tmp0 */
1695 instr = instr_create(ctx, 1, 0);
1696 instr->cat1.src_type = get_utype(ctx);
1697 instr->cat1.dst_type = get_ftype(ctx);
1698 vectorize(ctx, instr, dst, 1, tmp_src, 0);
1699 break;
1700 case TGSI_OPCODE_FSEQ:
1701 case TGSI_OPCODE_FSGE:
1702 case TGSI_OPCODE_FSNE:
1703 case TGSI_OPCODE_FSLT:
1704 /* absneg.s dst, (neg)tmp0 */
1705 instr = instr_create(ctx, 2, OPC_ABSNEG_S);
1706 vectorize(ctx, instr, dst, 1, tmp_src, IR3_REG_NEGATE);
1707 break;
1708 case TGSI_OPCODE_CMP:
1709 a1 = &inst->Src[1].Register;
1710 a2 = &inst->Src[2].Register;
1711 /* sel.{b32,b16} dst, src2, tmp, src1 */
1712 instr = instr_create(ctx, 3, OPC_SEL_B32);
1713 vectorize(ctx, instr, dst, 3, a1, 0, tmp_src, 0, a2, 0);
1714
1715 break;
1716 }
1717
1718 put_dst(ctx, inst, dst);
1719 }
1720
1721 /*
1722 * USNE(a,b) = (a != b) ? ~0 : 0
1723 * cmps.u32.ne dst, a, b
1724 *
1725 * USEQ(a,b) = (a == b) ? ~0 : 0
1726 * cmps.u32.eq dst, a, b
1727 *
1728 * ISGE(a,b) = (a > b) ? ~0 : 0
1729 * cmps.s32.ge dst, a, b
1730 *
1731 * USGE(a,b) = (a > b) ? ~0 : 0
1732 * cmps.u32.ge dst, a, b
1733 *
1734 * ISLT(a,b) = (a < b) ? ~0 : 0
1735 * cmps.s32.lt dst, a, b
1736 *
1737 * USLT(a,b) = (a < b) ? ~0 : 0
1738 * cmps.u32.lt dst, a, b
1739 *
1740 */
1741 static void
1742 trans_icmp(const struct instr_translater *t,
1743 struct ir3_compile_context *ctx,
1744 struct tgsi_full_instruction *inst)
1745 {
1746 struct ir3_instruction *instr;
1747 struct tgsi_dst_register *dst = get_dst(ctx, inst);
1748 struct tgsi_dst_register tmp_dst;
1749 struct tgsi_src_register *tmp_src;
1750 struct tgsi_src_register *a0, *a1;
1751 unsigned condition;
1752
1753 a0 = &inst->Src[0].Register; /* a */
1754 a1 = &inst->Src[1].Register; /* b */
1755
1756 switch (t->tgsi_opc) {
1757 case TGSI_OPCODE_USNE:
1758 condition = IR3_COND_NE;
1759 break;
1760 case TGSI_OPCODE_USEQ:
1761 condition = IR3_COND_EQ;
1762 break;
1763 case TGSI_OPCODE_ISGE:
1764 case TGSI_OPCODE_USGE:
1765 condition = IR3_COND_GE;
1766 break;
1767 case TGSI_OPCODE_ISLT:
1768 case TGSI_OPCODE_USLT:
1769 condition = IR3_COND_LT;
1770 break;
1771
1772 default:
1773 compile_assert(ctx, 0);
1774 return;
1775 }
1776
1777 if (is_const(a0) && is_const(a1))
1778 a0 = get_unconst(ctx, a0);
1779
1780 tmp_src = get_internal_temp(ctx, &tmp_dst);
1781 /* cmps.{u32,s32}.<cond> tmp, a0, a1 */
1782 instr = instr_create(ctx, 2, t->opc);
1783 instr->cat2.condition = condition;
1784 vectorize(ctx, instr, &tmp_dst, 2, a0, 0, a1, 0);
1785
1786 /* absneg.s dst, (neg)tmp */
1787 instr = instr_create(ctx, 2, OPC_ABSNEG_S);
1788 vectorize(ctx, instr, dst, 1, tmp_src, IR3_REG_NEGATE);
1789
1790 put_dst(ctx, inst, dst);
1791 }
1792
1793 /*
1794 * UCMP(a,b,c) = a ? b : c
1795 * sel.b16 dst, b, a, c
1796 */
1797 static void
1798 trans_ucmp(const struct instr_translater *t,
1799 struct ir3_compile_context *ctx,
1800 struct tgsi_full_instruction *inst)
1801 {
1802 struct ir3_instruction *instr;
1803 struct tgsi_dst_register *dst = get_dst(ctx, inst);
1804 struct tgsi_src_register *a0, *a1, *a2;
1805
1806 a0 = &inst->Src[0].Register; /* a */
1807 a1 = &inst->Src[1].Register; /* b */
1808 a2 = &inst->Src[2].Register; /* c */
1809
1810 if (is_rel_or_const(a0))
1811 a0 = get_unconst(ctx, a0);
1812
1813 /* sel.{b32,b16} dst, b, a, c */
1814 instr = instr_create(ctx, 3, OPC_SEL_B32);
1815 vectorize(ctx, instr, dst, 3, a1, 0, a0, 0, a2, 0);
1816 put_dst(ctx, inst, dst);
1817 }
1818
1819 /*
1820 * ISSG(a) = a < 0 ? -1 : a > 0 ? 1 : 0
1821 * cmps.s.lt tmp_neg, a, 0 # 1 if a is negative
1822 * cmps.s.gt tmp_pos, a, 0 # 1 if a is positive
1823 * sub.u dst, tmp_pos, tmp_neg
1824 */
1825 static void
1826 trans_issg(const struct instr_translater *t,
1827 struct ir3_compile_context *ctx,
1828 struct tgsi_full_instruction *inst)
1829 {
1830 struct ir3_instruction *instr;
1831 struct tgsi_dst_register *dst = get_dst(ctx, inst);
1832 struct tgsi_src_register *a = &inst->Src[0].Register;
1833 struct tgsi_dst_register neg_dst, pos_dst;
1834 struct tgsi_src_register *neg_src, *pos_src;
1835
1836 neg_src = get_internal_temp(ctx, &neg_dst);
1837 pos_src = get_internal_temp(ctx, &pos_dst);
1838
1839 /* cmps.s.lt neg, a, 0 */
1840 instr = instr_create(ctx, 2, OPC_CMPS_S);
1841 instr->cat2.condition = IR3_COND_LT;
1842 vectorize(ctx, instr, &neg_dst, 2, a, 0, 0, IR3_REG_IMMED);
1843
1844 /* cmps.s.gt pos, a, 0 */
1845 instr = instr_create(ctx, 2, OPC_CMPS_S);
1846 instr->cat2.condition = IR3_COND_GT;
1847 vectorize(ctx, instr, &pos_dst, 2, a, 0, 0, IR3_REG_IMMED);
1848
1849 /* sub.u dst, pos, neg */
1850 instr = instr_create(ctx, 2, OPC_SUB_U);
1851 vectorize(ctx, instr, dst, 2, pos_src, 0, neg_src, 0);
1852
1853 put_dst(ctx, inst, dst);
1854 }
1855
1856
1857
1858 /*
1859 * Conditional / Flow control
1860 */
1861
1862 static void
1863 push_branch(struct ir3_compile_context *ctx, bool inv,
1864 struct ir3_instruction *instr, struct ir3_instruction *cond)
1865 {
1866 unsigned int idx = ctx->branch_count++;
1867 compile_assert(ctx, idx < ARRAY_SIZE(ctx->branch));
1868 ctx->branch[idx].instr = instr;
1869 ctx->branch[idx].inv = inv;
1870 /* else side of branch has same condition: */
1871 if (!inv)
1872 ctx->branch[idx].cond = cond;
1873 }
1874
1875 static struct ir3_instruction *
1876 pop_branch(struct ir3_compile_context *ctx)
1877 {
1878 unsigned int idx = --ctx->branch_count;
1879 return ctx->branch[idx].instr;
1880 }
1881
1882 static void
1883 trans_if(const struct instr_translater *t,
1884 struct ir3_compile_context *ctx,
1885 struct tgsi_full_instruction *inst)
1886 {
1887 struct ir3_instruction *instr, *cond;
1888 struct tgsi_src_register *src = &inst->Src[0].Register;
1889 struct tgsi_dst_register tmp_dst;
1890 struct tgsi_src_register *tmp_src;
1891 struct tgsi_src_register constval;
1892
1893 get_immediate(ctx, &constval, fui(0.0));
1894 tmp_src = get_internal_temp(ctx, &tmp_dst);
1895
1896 if (is_const(src))
1897 src = get_unconst(ctx, src);
1898
1899 /* cmps.{f,u}.ne tmp0, b, {0.0} */
1900 instr = instr_create(ctx, 2, t->opc);
1901 add_dst_reg(ctx, instr, &tmp_dst, 0);
1902 add_src_reg(ctx, instr, src, src->SwizzleX);
1903 add_src_reg(ctx, instr, &constval, constval.SwizzleX);
1904 instr->cat2.condition = IR3_COND_NE;
1905
1906 compile_assert(ctx, instr->regs[1]->flags & IR3_REG_SSA); /* because get_unconst() */
1907 cond = instr->regs[1]->instr;
1908
1909 /* meta:flow tmp0 */
1910 instr = instr_create(ctx, -1, OPC_META_FLOW);
1911 ir3_reg_create(instr, 0, 0); /* dummy dst */
1912 add_src_reg(ctx, instr, tmp_src, TGSI_SWIZZLE_X);
1913
1914 push_branch(ctx, false, instr, cond);
1915 instr->flow.if_block = push_block(ctx);
1916 }
1917
1918 static void
1919 trans_else(const struct instr_translater *t,
1920 struct ir3_compile_context *ctx,
1921 struct tgsi_full_instruction *inst)
1922 {
1923 struct ir3_instruction *instr;
1924
1925 pop_block(ctx);
1926
1927 instr = pop_branch(ctx);
1928
1929 compile_assert(ctx, (instr->category == -1) &&
1930 (instr->opc == OPC_META_FLOW));
1931
1932 push_branch(ctx, true, instr, NULL);
1933 instr->flow.else_block = push_block(ctx);
1934 }
1935
1936 static struct ir3_instruction *
1937 find_temporary(struct ir3_block *block, unsigned n)
1938 {
1939 if (block->parent && !block->temporaries[n])
1940 return find_temporary(block->parent, n);
1941 return block->temporaries[n];
1942 }
1943
1944 static struct ir3_instruction *
1945 find_output(struct ir3_block *block, unsigned n)
1946 {
1947 if (block->parent && !block->outputs[n])
1948 return find_output(block->parent, n);
1949 return block->outputs[n];
1950 }
1951
1952 static struct ir3_instruction *
1953 create_phi(struct ir3_compile_context *ctx, struct ir3_instruction *cond,
1954 struct ir3_instruction *a, struct ir3_instruction *b)
1955 {
1956 struct ir3_instruction *phi;
1957
1958 compile_assert(ctx, cond);
1959
1960 /* Either side of the condition could be null.. which
1961 * indicates a variable written on only one side of the
1962 * branch. Normally this should only be variables not
1963 * used outside of that side of the branch. So we could
1964 * just 'return a ? a : b;' in that case. But for better
1965 * defined undefined behavior we just stick in imm{0.0}.
1966 * In the common case of a value only used within the
1967 * one side of the branch, the PHI instruction will not
1968 * get scheduled
1969 */
1970 if (!a)
1971 a = create_immed(ctx, 0.0);
1972 if (!b)
1973 b = create_immed(ctx, 0.0);
1974
1975 phi = instr_create(ctx, -1, OPC_META_PHI);
1976 ir3_reg_create(phi, 0, 0); /* dummy dst */
1977 ir3_reg_create(phi, 0, IR3_REG_SSA)->instr = cond;
1978 ir3_reg_create(phi, 0, IR3_REG_SSA)->instr = a;
1979 ir3_reg_create(phi, 0, IR3_REG_SSA)->instr = b;
1980
1981 return phi;
1982 }
1983
1984 static void
1985 trans_endif(const struct instr_translater *t,
1986 struct ir3_compile_context *ctx,
1987 struct tgsi_full_instruction *inst)
1988 {
1989 struct ir3_instruction *instr;
1990 struct ir3_block *ifb, *elseb;
1991 struct ir3_instruction **ifout, **elseout;
1992 unsigned i, ifnout = 0, elsenout = 0;
1993
1994 pop_block(ctx);
1995
1996 instr = pop_branch(ctx);
1997
1998 compile_assert(ctx, (instr->category == -1) &&
1999 (instr->opc == OPC_META_FLOW));
2000
2001 ifb = instr->flow.if_block;
2002 elseb = instr->flow.else_block;
2003 /* if there is no else block, the parent block is used for the
2004 * branch-not-taken src of the PHI instructions:
2005 */
2006 if (!elseb)
2007 elseb = ifb->parent;
2008
2009 /* worst case sizes: */
2010 ifnout = ifb->ntemporaries + ifb->noutputs;
2011 elsenout = elseb->ntemporaries + elseb->noutputs;
2012
2013 ifout = ir3_alloc(ctx->ir, sizeof(ifb->outputs[0]) * ifnout);
2014 if (elseb != ifb->parent)
2015 elseout = ir3_alloc(ctx->ir, sizeof(ifb->outputs[0]) * elsenout);
2016
2017 ifnout = 0;
2018 elsenout = 0;
2019
2020 /* generate PHI instructions for any temporaries written: */
2021 for (i = 0; i < ifb->ntemporaries; i++) {
2022 struct ir3_instruction *a = ifb->temporaries[i];
2023 struct ir3_instruction *b = elseb->temporaries[i];
2024
2025 /* if temporary written in if-block, or if else block
2026 * is present and temporary written in else-block:
2027 */
2028 if (a || ((elseb != ifb->parent) && b)) {
2029 struct ir3_instruction *phi;
2030
2031 /* if only written on one side, find the closest
2032 * enclosing update on other side:
2033 */
2034 if (!a)
2035 a = find_temporary(ifb, i);
2036 if (!b)
2037 b = find_temporary(elseb, i);
2038
2039 ifout[ifnout] = a;
2040 a = create_output(ifb, a, ifnout++);
2041
2042 if (elseb != ifb->parent) {
2043 elseout[elsenout] = b;
2044 b = create_output(elseb, b, elsenout++);
2045 }
2046
2047 phi = create_phi(ctx, instr, a, b);
2048 ctx->block->temporaries[i] = phi;
2049 }
2050 }
2051
2052 compile_assert(ctx, ifb->noutputs == elseb->noutputs);
2053
2054 /* .. and any outputs written: */
2055 for (i = 0; i < ifb->noutputs; i++) {
2056 struct ir3_instruction *a = ifb->outputs[i];
2057 struct ir3_instruction *b = elseb->outputs[i];
2058
2059 /* if output written in if-block, or if else block
2060 * is present and output written in else-block:
2061 */
2062 if (a || ((elseb != ifb->parent) && b)) {
2063 struct ir3_instruction *phi;
2064
2065 /* if only written on one side, find the closest
2066 * enclosing update on other side:
2067 */
2068 if (!a)
2069 a = find_output(ifb, i);
2070 if (!b)
2071 b = find_output(elseb, i);
2072
2073 ifout[ifnout] = a;
2074 a = create_output(ifb, a, ifnout++);
2075
2076 if (elseb != ifb->parent) {
2077 elseout[elsenout] = b;
2078 b = create_output(elseb, b, elsenout++);
2079 }
2080
2081 phi = create_phi(ctx, instr, a, b);
2082 ctx->block->outputs[i] = phi;
2083 }
2084 }
2085
2086 ifb->noutputs = ifnout;
2087 ifb->outputs = ifout;
2088
2089 if (elseb != ifb->parent) {
2090 elseb->noutputs = elsenout;
2091 elseb->outputs = elseout;
2092 }
2093
2094 // TODO maybe we want to compact block->inputs?
2095 }
2096
2097 /*
2098 * Kill
2099 */
2100
2101 static void
2102 trans_kill(const struct instr_translater *t,
2103 struct ir3_compile_context *ctx,
2104 struct tgsi_full_instruction *inst)
2105 {
2106 struct ir3_instruction *instr, *immed, *cond = NULL;
2107 bool inv = false;
2108
2109 /* unconditional kill, use enclosing if condition: */
2110 if (ctx->branch_count > 0) {
2111 unsigned int idx = ctx->branch_count - 1;
2112 cond = ctx->branch[idx].cond;
2113 inv = ctx->branch[idx].inv;
2114 } else {
2115 cond = create_immed(ctx, 1.0);
2116 }
2117
2118 compile_assert(ctx, cond);
2119
2120 immed = create_immed(ctx, 0.0);
2121
2122 /* cmps.f.ne p0.x, cond, {0.0} */
2123 instr = instr_create(ctx, 2, OPC_CMPS_F);
2124 instr->cat2.condition = IR3_COND_NE;
2125 ir3_reg_create(instr, regid(REG_P0, 0), 0);
2126 ir3_reg_create(instr, 0, IR3_REG_SSA)->instr = cond;
2127 ir3_reg_create(instr, 0, IR3_REG_SSA)->instr = immed;
2128 cond = instr;
2129
2130 /* kill p0.x */
2131 instr = instr_create(ctx, 0, OPC_KILL);
2132 instr->cat0.inv = inv;
2133 ir3_reg_create(instr, 0, 0); /* dummy dst */
2134 ir3_reg_create(instr, 0, IR3_REG_SSA)->instr = cond;
2135
2136 ctx->kill[ctx->kill_count++] = instr;
2137
2138 ctx->so->has_kill = true;
2139 }
2140
2141 /*
2142 * Kill-If
2143 */
2144
2145 static void
2146 trans_killif(const struct instr_translater *t,
2147 struct ir3_compile_context *ctx,
2148 struct tgsi_full_instruction *inst)
2149 {
2150 struct tgsi_src_register *src = &inst->Src[0].Register;
2151 struct ir3_instruction *instr, *immed, *cond = NULL;
2152 bool inv = false;
2153
2154 immed = create_immed(ctx, 0.0);
2155
2156 /* cmps.f.ne p0.x, cond, {0.0} */
2157 instr = instr_create(ctx, 2, OPC_CMPS_F);
2158 instr->cat2.condition = IR3_COND_NE;
2159 ir3_reg_create(instr, regid(REG_P0, 0), 0);
2160 ir3_reg_create(instr, 0, IR3_REG_SSA)->instr = immed;
2161 add_src_reg(ctx, instr, src, src->SwizzleX);
2162
2163 cond = instr;
2164
2165 /* kill p0.x */
2166 instr = instr_create(ctx, 0, OPC_KILL);
2167 instr->cat0.inv = inv;
2168 ir3_reg_create(instr, 0, 0); /* dummy dst */
2169 ir3_reg_create(instr, 0, IR3_REG_SSA)->instr = cond;
2170
2171 ctx->kill[ctx->kill_count++] = instr;
2172
2173 ctx->so->has_kill = true;
2174
2175 }
2176 /*
2177 * I2F / U2F / F2I / F2U
2178 */
2179
2180 static void
2181 trans_cov(const struct instr_translater *t,
2182 struct ir3_compile_context *ctx,
2183 struct tgsi_full_instruction *inst)
2184 {
2185 struct ir3_instruction *instr;
2186 struct tgsi_dst_register *dst = get_dst(ctx, inst);
2187 struct tgsi_src_register *src = &inst->Src[0].Register;
2188
2189 // cov.f32s32 dst, tmp0 /
2190 instr = instr_create(ctx, 1, 0);
2191 switch (t->tgsi_opc) {
2192 case TGSI_OPCODE_U2F:
2193 instr->cat1.src_type = TYPE_U32;
2194 instr->cat1.dst_type = TYPE_F32;
2195 break;
2196 case TGSI_OPCODE_I2F:
2197 instr->cat1.src_type = TYPE_S32;
2198 instr->cat1.dst_type = TYPE_F32;
2199 break;
2200 case TGSI_OPCODE_F2U:
2201 instr->cat1.src_type = TYPE_F32;
2202 instr->cat1.dst_type = TYPE_U32;
2203 break;
2204 case TGSI_OPCODE_F2I:
2205 instr->cat1.src_type = TYPE_F32;
2206 instr->cat1.dst_type = TYPE_S32;
2207 break;
2208
2209 }
2210 vectorize(ctx, instr, dst, 1, src, 0);
2211 put_dst(ctx, inst, dst);
2212 }
2213
2214 /*
2215 * UMUL / UMAD
2216 *
2217 * There is no 32-bit multiply instruction, so splitting a and b into high and
2218 * low components, we get that
2219 *
2220 * dst = al * bl + ah * bl << 16 + al * bh << 16
2221 *
2222 * mull.u tmp0, a, b (mul low, i.e. al * bl)
2223 * madsh.m16 tmp1, a, b, tmp0 (mul-add shift high mix, i.e. ah * bl << 16)
2224 * madsh.m16 dst, b, a, tmp1 (i.e. al * bh << 16)
2225 *
2226 * For UMAD, add in the extra argument after mull.u.
2227 */
2228 static void
2229 trans_umul(const struct instr_translater *t,
2230 struct ir3_compile_context *ctx,
2231 struct tgsi_full_instruction *inst)
2232 {
2233 struct ir3_instruction *instr;
2234 struct tgsi_dst_register *dst = get_dst(ctx, inst);
2235 struct tgsi_src_register *a = &inst->Src[0].Register;
2236 struct tgsi_src_register *b = &inst->Src[1].Register;
2237
2238 struct tgsi_dst_register tmp0_dst, tmp1_dst;
2239 struct tgsi_src_register *tmp0_src, *tmp1_src;
2240
2241 tmp0_src = get_internal_temp(ctx, &tmp0_dst);
2242 tmp1_src = get_internal_temp(ctx, &tmp1_dst);
2243
2244 if (is_rel_or_const(a))
2245 a = get_unconst(ctx, a);
2246 if (is_rel_or_const(b))
2247 b = get_unconst(ctx, b);
2248
2249 /* mull.u tmp0, a, b */
2250 instr = instr_create(ctx, 2, OPC_MULL_U);
2251 vectorize(ctx, instr, &tmp0_dst, 2, a, 0, b, 0);
2252
2253 if (t->tgsi_opc == TGSI_OPCODE_UMAD) {
2254 struct tgsi_src_register *c = &inst->Src[2].Register;
2255
2256 /* add.u tmp0, tmp0, c */
2257 instr = instr_create(ctx, 2, OPC_ADD_U);
2258 vectorize(ctx, instr, &tmp0_dst, 2, tmp0_src, 0, c, 0);
2259 }
2260
2261 /* madsh.m16 tmp1, a, b, tmp0 */
2262 instr = instr_create(ctx, 3, OPC_MADSH_M16);
2263 vectorize(ctx, instr, &tmp1_dst, 3, a, 0, b, 0, tmp0_src, 0);
2264
2265 /* madsh.m16 dst, b, a, tmp1 */
2266 instr = instr_create(ctx, 3, OPC_MADSH_M16);
2267 vectorize(ctx, instr, dst, 3, b, 0, a, 0, tmp1_src, 0);
2268 put_dst(ctx, inst, dst);
2269 }
2270
2271 /*
2272 * IDIV / UDIV / MOD / UMOD
2273 *
2274 * See NV50LegalizeSSA::handleDIV for the origin of this implementation. For
2275 * MOD/UMOD, it becomes a - [IU]DIV(a, modulus) * modulus.
2276 */
2277 static void
2278 trans_idiv(const struct instr_translater *t,
2279 struct ir3_compile_context *ctx,
2280 struct tgsi_full_instruction *inst)
2281 {
2282 struct ir3_instruction *instr;
2283 struct tgsi_dst_register *dst = get_dst(ctx, inst), *premod_dst = dst;
2284 struct tgsi_src_register *a = &inst->Src[0].Register;
2285 struct tgsi_src_register *b = &inst->Src[1].Register;
2286
2287 struct tgsi_dst_register af_dst, bf_dst, q_dst, r_dst, a_dst, b_dst;
2288 struct tgsi_src_register *af_src, *bf_src, *q_src, *r_src, *a_src, *b_src;
2289
2290 struct tgsi_src_register negative_2, thirty_one;
2291 type_t src_type;
2292
2293 if (t->tgsi_opc == TGSI_OPCODE_IDIV || t->tgsi_opc == TGSI_OPCODE_MOD)
2294 src_type = get_stype(ctx);
2295 else
2296 src_type = get_utype(ctx);
2297
2298 af_src = get_internal_temp(ctx, &af_dst);
2299 bf_src = get_internal_temp(ctx, &bf_dst);
2300 q_src = get_internal_temp(ctx, &q_dst);
2301 r_src = get_internal_temp(ctx, &r_dst);
2302 a_src = get_internal_temp(ctx, &a_dst);
2303 b_src = get_internal_temp(ctx, &b_dst);
2304
2305 get_immediate(ctx, &negative_2, -2);
2306 get_immediate(ctx, &thirty_one, 31);
2307
2308 if (t->tgsi_opc == TGSI_OPCODE_MOD || t->tgsi_opc == TGSI_OPCODE_UMOD)
2309 premod_dst = &q_dst;
2310
2311 /* cov.[us]32f32 af, numerator */
2312 instr = instr_create(ctx, 1, 0);
2313 instr->cat1.src_type = src_type;
2314 instr->cat1.dst_type = get_ftype(ctx);
2315 vectorize(ctx, instr, &af_dst, 1, a, 0);
2316
2317 /* cov.[us]32f32 bf, denominator */
2318 instr = instr_create(ctx, 1, 0);
2319 instr->cat1.src_type = src_type;
2320 instr->cat1.dst_type = get_ftype(ctx);
2321 vectorize(ctx, instr, &bf_dst, 1, b, 0);
2322
2323 /* Get the absolute values for IDIV */
2324 if (type_sint(src_type)) {
2325 /* absneg.f af, (abs)af */
2326 instr = instr_create(ctx, 2, OPC_ABSNEG_F);
2327 vectorize(ctx, instr, &af_dst, 1, af_src, IR3_REG_ABS);
2328
2329 /* absneg.f bf, (abs)bf */
2330 instr = instr_create(ctx, 2, OPC_ABSNEG_F);
2331 vectorize(ctx, instr, &bf_dst, 1, bf_src, IR3_REG_ABS);
2332
2333 /* absneg.s a, (abs)numerator */
2334 instr = instr_create(ctx, 2, OPC_ABSNEG_S);
2335 vectorize(ctx, instr, &a_dst, 1, a, IR3_REG_ABS);
2336
2337 /* absneg.s b, (abs)denominator */
2338 instr = instr_create(ctx, 2, OPC_ABSNEG_S);
2339 vectorize(ctx, instr, &b_dst, 1, b, IR3_REG_ABS);
2340 } else {
2341 /* mov.u32u32 a, numerator */
2342 instr = instr_create(ctx, 1, 0);
2343 instr->cat1.src_type = src_type;
2344 instr->cat1.dst_type = src_type;
2345 vectorize(ctx, instr, &a_dst, 1, a, 0);
2346
2347 /* mov.u32u32 b, denominator */
2348 instr = instr_create(ctx, 1, 0);
2349 instr->cat1.src_type = src_type;
2350 instr->cat1.dst_type = src_type;
2351 vectorize(ctx, instr, &b_dst, 1, b, 0);
2352 }
2353
2354 /* rcp.f bf, bf */
2355 instr = instr_create(ctx, 4, OPC_RCP);
2356 vectorize(ctx, instr, &bf_dst, 1, bf_src, 0);
2357
2358 /* That's right, subtract 2 as an integer from the float */
2359 /* add.u bf, bf, -2 */
2360 instr = instr_create(ctx, 2, OPC_ADD_U);
2361 vectorize(ctx, instr, &bf_dst, 2, bf_src, 0, &negative_2, 0);
2362
2363 /* mul.f q, af, bf */
2364 instr = instr_create(ctx, 2, OPC_MUL_F);
2365 vectorize(ctx, instr, &q_dst, 2, af_src, 0, bf_src, 0);
2366
2367 /* cov.f32[us]32 q, q */
2368 instr = instr_create(ctx, 1, 0);
2369 instr->cat1.src_type = get_ftype(ctx);
2370 instr->cat1.dst_type = src_type;
2371 vectorize(ctx, instr, &q_dst, 1, q_src, 0);
2372
2373 /* integer multiply q by b */
2374 /* mull.u r, q, b */
2375 instr = instr_create(ctx, 2, OPC_MULL_U);
2376 vectorize(ctx, instr, &r_dst, 2, q_src, 0, b_src, 0);
2377
2378 /* madsh.m16 r, q, b, r */
2379 instr = instr_create(ctx, 3, OPC_MADSH_M16);
2380 vectorize(ctx, instr, &r_dst, 3, q_src, 0, b_src, 0, r_src, 0);
2381
2382 /* madsh.m16, r, b, q, r */
2383 instr = instr_create(ctx, 3, OPC_MADSH_M16);
2384 vectorize(ctx, instr, &r_dst, 3, b_src, 0, q_src, 0, r_src, 0);
2385
2386 /* sub.u r, a, r */
2387 instr = instr_create(ctx, 2, OPC_SUB_U);
2388 vectorize(ctx, instr, &r_dst, 2, a_src, 0, r_src, 0);
2389
2390 /* cov.u32f32, r, r */
2391 instr = instr_create(ctx, 1, 0);
2392 instr->cat1.src_type = get_utype(ctx);
2393 instr->cat1.dst_type = get_ftype(ctx);
2394 vectorize(ctx, instr, &r_dst, 1, r_src, 0);
2395
2396 /* mul.f r, r, bf */
2397 instr = instr_create(ctx, 2, OPC_MUL_F);
2398 vectorize(ctx, instr, &r_dst, 2, r_src, 0, bf_src, 0);
2399
2400 /* cov.f32u32 r, r */
2401 instr = instr_create(ctx, 1, 0);
2402 instr->cat1.src_type = get_ftype(ctx);
2403 instr->cat1.dst_type = get_utype(ctx);
2404 vectorize(ctx, instr, &r_dst, 1, r_src, 0);
2405
2406 /* add.u q, q, r */
2407 instr = instr_create(ctx, 2, OPC_ADD_U);
2408 vectorize(ctx, instr, &q_dst, 2, q_src, 0, r_src, 0);
2409
2410 /* mull.u r, q, b */
2411 instr = instr_create(ctx, 2, OPC_MULL_U);
2412 vectorize(ctx, instr, &r_dst, 2, q_src, 0, b_src, 0);
2413
2414 /* madsh.m16 r, q, b, r */
2415 instr = instr_create(ctx, 3, OPC_MADSH_M16);
2416 vectorize(ctx, instr, &r_dst, 3, q_src, 0, b_src, 0, r_src, 0);
2417
2418 /* madsh.m16 r, b, q, r */
2419 instr = instr_create(ctx, 3, OPC_MADSH_M16);
2420 vectorize(ctx, instr, &r_dst, 3, b_src, 0, q_src, 0, r_src, 0);
2421
2422 /* sub.u r, a, r */
2423 instr = instr_create(ctx, 2, OPC_SUB_U);
2424 vectorize(ctx, instr, &r_dst, 2, a_src, 0, r_src, 0);
2425
2426 /* cmps.u.ge r, r, b */
2427 instr = instr_create(ctx, 2, OPC_CMPS_U);
2428 instr->cat2.condition = IR3_COND_GE;
2429 vectorize(ctx, instr, &r_dst, 2, r_src, 0, b_src, 0);
2430
2431 if (type_uint(src_type)) {
2432 /* add.u dst, q, r */
2433 instr = instr_create(ctx, 2, OPC_ADD_U);
2434 vectorize(ctx, instr, premod_dst, 2, q_src, 0, r_src, 0);
2435 } else {
2436 /* add.u q, q, r */
2437 instr = instr_create(ctx, 2, OPC_ADD_U);
2438 vectorize(ctx, instr, &q_dst, 2, q_src, 0, r_src, 0);
2439
2440 /* negate result based on the original arguments */
2441 if (is_const(a) && is_const(b))
2442 a = get_unconst(ctx, a);
2443
2444 /* xor.b r, numerator, denominator */
2445 instr = instr_create(ctx, 2, OPC_XOR_B);
2446 vectorize(ctx, instr, &r_dst, 2, a, 0, b, 0);
2447
2448 /* shr.b r, r, 31 */
2449 instr = instr_create(ctx, 2, OPC_SHR_B);
2450 vectorize(ctx, instr, &r_dst, 2, r_src, 0, &thirty_one, 0);
2451
2452 /* absneg.s b, (neg)q */
2453 instr = instr_create(ctx, 2, OPC_ABSNEG_S);
2454 vectorize(ctx, instr, &b_dst, 1, q_src, IR3_REG_NEGATE);
2455
2456 /* sel.b dst, b, r, q */
2457 instr = instr_create(ctx, 3, OPC_SEL_B32);
2458 vectorize(ctx, instr, premod_dst, 3, b_src, 0, r_src, 0, q_src, 0);
2459 }
2460
2461 if (t->tgsi_opc == TGSI_OPCODE_MOD || t->tgsi_opc == TGSI_OPCODE_UMOD) {
2462 /* The division result will have ended up in q. */
2463
2464 if (is_rel_or_const(b))
2465 b = get_unconst(ctx, b);
2466
2467 /* mull.u r, q, b */
2468 instr = instr_create(ctx, 2, OPC_MULL_U);
2469 vectorize(ctx, instr, &r_dst, 2, q_src, 0, b, 0);
2470
2471 /* madsh.m16 r, q, b, r */
2472 instr = instr_create(ctx, 3, OPC_MADSH_M16);
2473 vectorize(ctx, instr, &r_dst, 3, q_src, 0, b, 0, r_src, 0);
2474
2475 /* madsh.m16 r, b, q, r */
2476 instr = instr_create(ctx, 3, OPC_MADSH_M16);
2477 vectorize(ctx, instr, &r_dst, 3, b, 0, q_src, 0, r_src, 0);
2478
2479 /* sub.u dst, a, r */
2480 instr = instr_create(ctx, 2, OPC_SUB_U);
2481 vectorize(ctx, instr, dst, 2, a, 0, r_src, 0);
2482 }
2483
2484 put_dst(ctx, inst, dst);
2485 }
2486
2487 /*
2488 * Handlers for TGSI instructions which do have 1:1 mapping to native
2489 * instructions:
2490 */
2491
2492 static void
2493 instr_cat0(const struct instr_translater *t,
2494 struct ir3_compile_context *ctx,
2495 struct tgsi_full_instruction *inst)
2496 {
2497 instr_create(ctx, 0, t->opc);
2498 }
2499
2500 static void
2501 instr_cat1(const struct instr_translater *t,
2502 struct ir3_compile_context *ctx,
2503 struct tgsi_full_instruction *inst)
2504 {
2505 struct tgsi_dst_register *dst = get_dst(ctx, inst);
2506 struct tgsi_src_register *src = &inst->Src[0].Register;
2507 create_mov(ctx, dst, src);
2508 put_dst(ctx, inst, dst);
2509 }
2510
2511 static void
2512 instr_cat2(const struct instr_translater *t,
2513 struct ir3_compile_context *ctx,
2514 struct tgsi_full_instruction *inst)
2515 {
2516 struct tgsi_dst_register *dst = get_dst(ctx, inst);
2517 struct tgsi_src_register *src0 = &inst->Src[0].Register;
2518 struct tgsi_src_register *src1 = &inst->Src[1].Register;
2519 struct ir3_instruction *instr;
2520 unsigned src0_flags = 0, src1_flags = 0;
2521
2522 switch (t->tgsi_opc) {
2523 case TGSI_OPCODE_ABS:
2524 case TGSI_OPCODE_IABS:
2525 src0_flags = IR3_REG_ABS;
2526 break;
2527 case TGSI_OPCODE_INEG:
2528 src0_flags = IR3_REG_NEGATE;
2529 break;
2530 case TGSI_OPCODE_SUB:
2531 src1_flags = IR3_REG_NEGATE;
2532 break;
2533 }
2534
2535 switch (t->opc) {
2536 case OPC_ABSNEG_F:
2537 case OPC_ABSNEG_S:
2538 case OPC_CLZ_B:
2539 case OPC_CLZ_S:
2540 case OPC_SIGN_F:
2541 case OPC_FLOOR_F:
2542 case OPC_CEIL_F:
2543 case OPC_RNDNE_F:
2544 case OPC_RNDAZ_F:
2545 case OPC_TRUNC_F:
2546 case OPC_NOT_B:
2547 case OPC_BFREV_B:
2548 case OPC_SETRM:
2549 case OPC_CBITS_B:
2550 /* these only have one src reg */
2551 instr = instr_create(ctx, 2, t->opc);
2552 vectorize(ctx, instr, dst, 1, src0, src0_flags);
2553 break;
2554 default:
2555 if (is_const(src0) && is_const(src1))
2556 src0 = get_unconst(ctx, src0);
2557
2558 instr = instr_create(ctx, 2, t->opc);
2559 vectorize(ctx, instr, dst, 2, src0, src0_flags,
2560 src1, src1_flags);
2561 break;
2562 }
2563
2564 put_dst(ctx, inst, dst);
2565 }
2566
2567 static void
2568 instr_cat3(const struct instr_translater *t,
2569 struct ir3_compile_context *ctx,
2570 struct tgsi_full_instruction *inst)
2571 {
2572 struct tgsi_dst_register *dst = get_dst(ctx, inst);
2573 struct tgsi_src_register *src0 = &inst->Src[0].Register;
2574 struct tgsi_src_register *src1 = &inst->Src[1].Register;
2575 struct ir3_instruction *instr;
2576
2577 /* in particular, can't handle const for src1 for cat3..
2578 * for mad, we can swap first two src's if needed:
2579 */
2580 if (is_rel_or_const(src1)) {
2581 if (is_mad(t->opc) && !is_rel_or_const(src0)) {
2582 struct tgsi_src_register *tmp;
2583 tmp = src0;
2584 src0 = src1;
2585 src1 = tmp;
2586 } else {
2587 src1 = get_unconst(ctx, src1);
2588 }
2589 }
2590
2591 instr = instr_create(ctx, 3, t->opc);
2592 vectorize(ctx, instr, dst, 3, src0, 0, src1, 0,
2593 &inst->Src[2].Register, 0);
2594 put_dst(ctx, inst, dst);
2595 }
2596
2597 static void
2598 instr_cat4(const struct instr_translater *t,
2599 struct ir3_compile_context *ctx,
2600 struct tgsi_full_instruction *inst)
2601 {
2602 struct tgsi_dst_register *dst = get_dst(ctx, inst);
2603 struct tgsi_src_register *src = &inst->Src[0].Register;
2604 struct ir3_instruction *instr;
2605 unsigned i;
2606
2607 /* seems like blob compiler avoids const as src.. */
2608 if (is_const(src))
2609 src = get_unconst(ctx, src);
2610
2611 /* we need to replicate into each component: */
2612 for (i = 0; i < 4; i++) {
2613 if (dst->WriteMask & (1 << i)) {
2614 instr = instr_create(ctx, 4, t->opc);
2615 add_dst_reg(ctx, instr, dst, i);
2616 add_src_reg(ctx, instr, src, src->SwizzleX);
2617 }
2618 }
2619
2620 put_dst(ctx, inst, dst);
2621 }
2622
2623 static const struct instr_translater translaters[TGSI_OPCODE_LAST] = {
2624 #define INSTR(n, f, ...) \
2625 [TGSI_OPCODE_ ## n] = { .fxn = (f), .tgsi_opc = TGSI_OPCODE_ ## n, ##__VA_ARGS__ }
2626
2627 INSTR(MOV, instr_cat1),
2628 INSTR(RCP, instr_cat4, .opc = OPC_RCP),
2629 INSTR(RSQ, instr_cat4, .opc = OPC_RSQ),
2630 INSTR(SQRT, instr_cat4, .opc = OPC_SQRT),
2631 INSTR(MUL, instr_cat2, .opc = OPC_MUL_F),
2632 INSTR(ADD, instr_cat2, .opc = OPC_ADD_F),
2633 INSTR(SUB, instr_cat2, .opc = OPC_ADD_F),
2634 INSTR(MIN, instr_cat2, .opc = OPC_MIN_F),
2635 INSTR(MAX, instr_cat2, .opc = OPC_MAX_F),
2636 INSTR(UADD, instr_cat2, .opc = OPC_ADD_U),
2637 INSTR(IMIN, instr_cat2, .opc = OPC_MIN_S),
2638 INSTR(UMIN, instr_cat2, .opc = OPC_MIN_U),
2639 INSTR(IMAX, instr_cat2, .opc = OPC_MAX_S),
2640 INSTR(UMAX, instr_cat2, .opc = OPC_MAX_U),
2641 INSTR(AND, instr_cat2, .opc = OPC_AND_B),
2642 INSTR(OR, instr_cat2, .opc = OPC_OR_B),
2643 INSTR(NOT, instr_cat2, .opc = OPC_NOT_B),
2644 INSTR(XOR, instr_cat2, .opc = OPC_XOR_B),
2645 INSTR(UMUL, trans_umul),
2646 INSTR(UMAD, trans_umul),
2647 INSTR(UDIV, trans_idiv),
2648 INSTR(IDIV, trans_idiv),
2649 INSTR(MOD, trans_idiv),
2650 INSTR(UMOD, trans_idiv),
2651 INSTR(SHL, instr_cat2, .opc = OPC_SHL_B),
2652 INSTR(USHR, instr_cat2, .opc = OPC_SHR_B),
2653 INSTR(ISHR, instr_cat2, .opc = OPC_ASHR_B),
2654 INSTR(IABS, instr_cat2, .opc = OPC_ABSNEG_S),
2655 INSTR(INEG, instr_cat2, .opc = OPC_ABSNEG_S),
2656 INSTR(AND, instr_cat2, .opc = OPC_AND_B),
2657 INSTR(MAD, instr_cat3, .opc = OPC_MAD_F32, .hopc = OPC_MAD_F16),
2658 INSTR(TRUNC, instr_cat2, .opc = OPC_TRUNC_F),
2659 INSTR(CLAMP, trans_clamp),
2660 INSTR(FLR, instr_cat2, .opc = OPC_FLOOR_F),
2661 INSTR(ROUND, instr_cat2, .opc = OPC_RNDNE_F),
2662 INSTR(SSG, instr_cat2, .opc = OPC_SIGN_F),
2663 INSTR(CEIL, instr_cat2, .opc = OPC_CEIL_F),
2664 INSTR(ARL, trans_arl),
2665 INSTR(UARL, trans_arl),
2666 INSTR(EX2, instr_cat4, .opc = OPC_EXP2),
2667 INSTR(LG2, instr_cat4, .opc = OPC_LOG2),
2668 INSTR(ABS, instr_cat2, .opc = OPC_ABSNEG_F),
2669 INSTR(COS, instr_cat4, .opc = OPC_COS),
2670 INSTR(SIN, instr_cat4, .opc = OPC_SIN),
2671 INSTR(TEX, trans_samp, .opc = OPC_SAM),
2672 INSTR(TXP, trans_samp, .opc = OPC_SAM),
2673 INSTR(TXB, trans_samp, .opc = OPC_SAMB),
2674 INSTR(TXB2, trans_samp, .opc = OPC_SAMB),
2675 INSTR(TXL, trans_samp, .opc = OPC_SAML),
2676 INSTR(TXD, trans_samp, .opc = OPC_SAMGQ),
2677 INSTR(TXF, trans_samp, .opc = OPC_ISAML),
2678 INSTR(TXQ, trans_txq),
2679 INSTR(DDX, trans_deriv, .opc = OPC_DSX),
2680 INSTR(DDY, trans_deriv, .opc = OPC_DSY),
2681 INSTR(SGT, trans_cmp),
2682 INSTR(SLT, trans_cmp),
2683 INSTR(FSLT, trans_cmp),
2684 INSTR(SGE, trans_cmp),
2685 INSTR(FSGE, trans_cmp),
2686 INSTR(SLE, trans_cmp),
2687 INSTR(SNE, trans_cmp),
2688 INSTR(FSNE, trans_cmp),
2689 INSTR(SEQ, trans_cmp),
2690 INSTR(FSEQ, trans_cmp),
2691 INSTR(CMP, trans_cmp),
2692 INSTR(USNE, trans_icmp, .opc = OPC_CMPS_U),
2693 INSTR(USEQ, trans_icmp, .opc = OPC_CMPS_U),
2694 INSTR(ISGE, trans_icmp, .opc = OPC_CMPS_S),
2695 INSTR(USGE, trans_icmp, .opc = OPC_CMPS_U),
2696 INSTR(ISLT, trans_icmp, .opc = OPC_CMPS_S),
2697 INSTR(USLT, trans_icmp, .opc = OPC_CMPS_U),
2698 INSTR(UCMP, trans_ucmp),
2699 INSTR(ISSG, trans_issg),
2700 INSTR(IF, trans_if, .opc = OPC_CMPS_F),
2701 INSTR(UIF, trans_if, .opc = OPC_CMPS_U),
2702 INSTR(ELSE, trans_else),
2703 INSTR(ENDIF, trans_endif),
2704 INSTR(END, instr_cat0, .opc = OPC_END),
2705 INSTR(KILL, trans_kill, .opc = OPC_KILL),
2706 INSTR(KILL_IF, trans_killif, .opc = OPC_KILL),
2707 INSTR(I2F, trans_cov),
2708 INSTR(U2F, trans_cov),
2709 INSTR(F2I, trans_cov),
2710 INSTR(F2U, trans_cov),
2711 };
2712
2713 static ir3_semantic
2714 decl_semantic(const struct tgsi_declaration_semantic *sem)
2715 {
2716 return ir3_semantic_name(sem->Name, sem->Index);
2717 }
2718
2719 static struct ir3_instruction *
2720 decl_in_frag_bary(struct ir3_compile_context *ctx, unsigned regid,
2721 unsigned j, unsigned inloc)
2722 {
2723 struct ir3_instruction *instr;
2724 struct ir3_register *src;
2725
2726 /* bary.f dst, #inloc, r0.x */
2727 instr = instr_create(ctx, 2, OPC_BARY_F);
2728 ir3_reg_create(instr, regid, 0); /* dummy dst */
2729 ir3_reg_create(instr, 0, IR3_REG_IMMED)->iim_val = inloc;
2730 src = ir3_reg_create(instr, 0, IR3_REG_SSA);
2731 src->wrmask = 0x3;
2732 src->instr = ctx->frag_pos;
2733
2734 return instr;
2735 }
2736
2737 /* TGSI_SEMANTIC_POSITION
2738 * """"""""""""""""""""""
2739 *
2740 * For fragment shaders, TGSI_SEMANTIC_POSITION is used to indicate that
2741 * fragment shader input contains the fragment's window position. The X
2742 * component starts at zero and always increases from left to right.
2743 * The Y component starts at zero and always increases but Y=0 may either
2744 * indicate the top of the window or the bottom depending on the fragment
2745 * coordinate origin convention (see TGSI_PROPERTY_FS_COORD_ORIGIN).
2746 * The Z coordinate ranges from 0 to 1 to represent depth from the front
2747 * to the back of the Z buffer. The W component contains the reciprocol
2748 * of the interpolated vertex position W component.
2749 */
2750 static struct ir3_instruction *
2751 decl_in_frag_coord(struct ir3_compile_context *ctx, unsigned regid,
2752 unsigned j)
2753 {
2754 struct ir3_instruction *instr, *src;
2755
2756 compile_assert(ctx, !ctx->frag_coord[j]);
2757
2758 ctx->frag_coord[j] = create_input(ctx->block, NULL, 0);
2759
2760
2761 switch (j) {
2762 case 0: /* .x */
2763 case 1: /* .y */
2764 /* for frag_coord, we get unsigned values.. we need
2765 * to subtract (integer) 8 and divide by 16 (right-
2766 * shift by 4) then convert to float:
2767 */
2768
2769 /* add.s tmp, src, -8 */
2770 instr = instr_create(ctx, 2, OPC_ADD_S);
2771 ir3_reg_create(instr, regid, 0); /* dummy dst */
2772 ir3_reg_create(instr, 0, IR3_REG_SSA)->instr = ctx->frag_coord[j];
2773 ir3_reg_create(instr, 0, IR3_REG_IMMED)->iim_val = -8;
2774 src = instr;
2775
2776 /* shr.b tmp, tmp, 4 */
2777 instr = instr_create(ctx, 2, OPC_SHR_B);
2778 ir3_reg_create(instr, regid, 0); /* dummy dst */
2779 ir3_reg_create(instr, 0, IR3_REG_SSA)->instr = src;
2780 ir3_reg_create(instr, 0, IR3_REG_IMMED)->iim_val = 4;
2781 src = instr;
2782
2783 /* mov.u32f32 dst, tmp */
2784 instr = instr_create(ctx, 1, 0);
2785 instr->cat1.src_type = TYPE_U32;
2786 instr->cat1.dst_type = TYPE_F32;
2787 ir3_reg_create(instr, regid, 0); /* dummy dst */
2788 ir3_reg_create(instr, 0, IR3_REG_SSA)->instr = src;
2789
2790 break;
2791 case 2: /* .z */
2792 case 3: /* .w */
2793 /* seems that we can use these as-is: */
2794 instr = ctx->frag_coord[j];
2795 break;
2796 default:
2797 compile_error(ctx, "invalid channel\n");
2798 instr = create_immed(ctx, 0.0);
2799 break;
2800 }
2801
2802 return instr;
2803 }
2804
2805 /* TGSI_SEMANTIC_FACE
2806 * """"""""""""""""""
2807 *
2808 * This label applies to fragment shader inputs only and indicates that
2809 * the register contains front/back-face information of the form (F, 0,
2810 * 0, 1). The first component will be positive when the fragment belongs
2811 * to a front-facing polygon, and negative when the fragment belongs to a
2812 * back-facing polygon.
2813 */
2814 static struct ir3_instruction *
2815 decl_in_frag_face(struct ir3_compile_context *ctx, unsigned regid,
2816 unsigned j)
2817 {
2818 struct ir3_instruction *instr, *src;
2819
2820 switch (j) {
2821 case 0: /* .x */
2822 compile_assert(ctx, !ctx->frag_face);
2823
2824 ctx->frag_face = create_input(ctx->block, NULL, 0);
2825
2826 /* for faceness, we always get -1 or 0 (int).. but TGSI expects
2827 * positive vs negative float.. and piglit further seems to
2828 * expect -1.0 or 1.0:
2829 *
2830 * mul.s tmp, hr0.x, 2
2831 * add.s tmp, tmp, 1
2832 * mov.s16f32, dst, tmp
2833 *
2834 */
2835
2836 instr = instr_create(ctx, 2, OPC_MUL_S);
2837 ir3_reg_create(instr, regid, 0); /* dummy dst */
2838 ir3_reg_create(instr, 0, IR3_REG_SSA)->instr = ctx->frag_face;
2839 ir3_reg_create(instr, 0, IR3_REG_IMMED)->iim_val = 2;
2840 src = instr;
2841
2842 instr = instr_create(ctx, 2, OPC_ADD_S);
2843 ir3_reg_create(instr, regid, 0); /* dummy dst */
2844 ir3_reg_create(instr, 0, IR3_REG_SSA)->instr = src;
2845 ir3_reg_create(instr, 0, IR3_REG_IMMED)->iim_val = 1;
2846 src = instr;
2847
2848 instr = instr_create(ctx, 1, 0); /* mov */
2849 instr->cat1.src_type = TYPE_S32;
2850 instr->cat1.dst_type = TYPE_F32;
2851 ir3_reg_create(instr, regid, 0); /* dummy dst */
2852 ir3_reg_create(instr, 0, IR3_REG_SSA)->instr = src;
2853
2854 break;
2855 case 1: /* .y */
2856 case 2: /* .z */
2857 instr = create_immed(ctx, 0.0);
2858 break;
2859 case 3: /* .w */
2860 instr = create_immed(ctx, 1.0);
2861 break;
2862 default:
2863 compile_error(ctx, "invalid channel\n");
2864 instr = create_immed(ctx, 0.0);
2865 break;
2866 }
2867
2868 return instr;
2869 }
2870
2871 static void
2872 decl_in(struct ir3_compile_context *ctx, struct tgsi_full_declaration *decl)
2873 {
2874 struct ir3_shader_variant *so = ctx->so;
2875 unsigned name = decl->Semantic.Name;
2876 unsigned i;
2877
2878 /* I don't think we should get frag shader input without
2879 * semantic info? Otherwise how do inputs get linked to
2880 * vert outputs?
2881 */
2882 compile_assert(ctx, (ctx->type == TGSI_PROCESSOR_VERTEX) ||
2883 decl->Declaration.Semantic);
2884
2885 for (i = decl->Range.First; i <= decl->Range.Last; i++) {
2886 unsigned n = so->inputs_count++;
2887 unsigned r = regid(i, 0);
2888 unsigned ncomp, j;
2889
2890 /* we'll figure out the actual components used after scheduling */
2891 ncomp = 4;
2892
2893 DBG("decl in -> r%d", i);
2894
2895 compile_assert(ctx, n < ARRAY_SIZE(so->inputs));
2896
2897 so->inputs[n].semantic = decl_semantic(&decl->Semantic);
2898 so->inputs[n].compmask = (1 << ncomp) - 1;
2899 so->inputs[n].regid = r;
2900 so->inputs[n].inloc = ctx->next_inloc;
2901 so->inputs[n].interpolate = decl->Interp.Interpolate;
2902
2903 for (j = 0; j < ncomp; j++) {
2904 struct ir3_instruction *instr = NULL;
2905
2906 if (ctx->type == TGSI_PROCESSOR_FRAGMENT) {
2907 /* for fragment shaders, POSITION and FACE are handled
2908 * specially, not using normal varying / bary.f
2909 */
2910 if (name == TGSI_SEMANTIC_POSITION) {
2911 so->inputs[n].bary = false;
2912 so->frag_coord = true;
2913 instr = decl_in_frag_coord(ctx, r + j, j);
2914 } else if (name == TGSI_SEMANTIC_FACE) {
2915 so->inputs[n].bary = false;
2916 so->frag_face = true;
2917 instr = decl_in_frag_face(ctx, r + j, j);
2918 } else {
2919 so->inputs[n].bary = true;
2920 instr = decl_in_frag_bary(ctx, r + j, j,
2921 so->inputs[n].inloc + j - 8);
2922 }
2923 } else {
2924 instr = create_input(ctx->block, NULL, (i * 4) + j);
2925 }
2926
2927 ctx->block->inputs[(i * 4) + j] = instr;
2928 }
2929
2930 if (so->inputs[n].bary || (ctx->type == TGSI_PROCESSOR_VERTEX)) {
2931 ctx->next_inloc += ncomp;
2932 so->total_in += ncomp;
2933 }
2934 }
2935 }
2936
2937 static void
2938 decl_out(struct ir3_compile_context *ctx, struct tgsi_full_declaration *decl)
2939 {
2940 struct ir3_shader_variant *so = ctx->so;
2941 unsigned comp = 0;
2942 unsigned name = decl->Semantic.Name;
2943 unsigned i;
2944
2945 compile_assert(ctx, decl->Declaration.Semantic);
2946
2947 DBG("decl out[%d] -> r%d", name, decl->Range.First);
2948
2949 if (ctx->type == TGSI_PROCESSOR_VERTEX) {
2950 switch (name) {
2951 case TGSI_SEMANTIC_POSITION:
2952 so->writes_pos = true;
2953 break;
2954 case TGSI_SEMANTIC_PSIZE:
2955 so->writes_psize = true;
2956 break;
2957 case TGSI_SEMANTIC_COLOR:
2958 case TGSI_SEMANTIC_BCOLOR:
2959 case TGSI_SEMANTIC_GENERIC:
2960 case TGSI_SEMANTIC_FOG:
2961 case TGSI_SEMANTIC_TEXCOORD:
2962 break;
2963 default:
2964 compile_error(ctx, "unknown VS semantic name: %s\n",
2965 tgsi_semantic_names[name]);
2966 }
2967 } else {
2968 switch (name) {
2969 case TGSI_SEMANTIC_POSITION:
2970 comp = 2; /* tgsi will write to .z component */
2971 so->writes_pos = true;
2972 break;
2973 case TGSI_SEMANTIC_COLOR:
2974 break;
2975 default:
2976 compile_error(ctx, "unknown FS semantic name: %s\n",
2977 tgsi_semantic_names[name]);
2978 }
2979 }
2980
2981 for (i = decl->Range.First; i <= decl->Range.Last; i++) {
2982 unsigned n = so->outputs_count++;
2983 unsigned ncomp, j;
2984
2985 ncomp = 4;
2986
2987 compile_assert(ctx, n < ARRAY_SIZE(so->outputs));
2988
2989 so->outputs[n].semantic = decl_semantic(&decl->Semantic);
2990 so->outputs[n].regid = regid(i, comp);
2991
2992 /* avoid undefined outputs, stick a dummy mov from imm{0.0},
2993 * which if the output is actually assigned will be over-
2994 * written
2995 */
2996 for (j = 0; j < ncomp; j++)
2997 ctx->block->outputs[(i * 4) + j] = create_immed(ctx, 0.0);
2998 }
2999 }
3000
3001 /* from TGSI perspective, we actually have inputs. But most of the "inputs"
3002 * for a fragment shader are just bary.f instructions. The *actual* inputs
3003 * from the hw perspective are the frag_pos and optionally frag_coord and
3004 * frag_face.
3005 */
3006 static void
3007 fixup_frag_inputs(struct ir3_compile_context *ctx)
3008 {
3009 struct ir3_shader_variant *so = ctx->so;
3010 struct ir3_block *block = ctx->block;
3011 struct ir3_instruction **inputs;
3012 struct ir3_instruction *instr;
3013 int n, regid = 0;
3014
3015 block->ninputs = 0;
3016
3017 n = 4; /* always have frag_pos */
3018 n += COND(so->frag_face, 4);
3019 n += COND(so->frag_coord, 4);
3020
3021 inputs = ir3_alloc(ctx->ir, n * (sizeof(struct ir3_instruction *)));
3022
3023 if (so->frag_face) {
3024 /* this ultimately gets assigned to hr0.x so doesn't conflict
3025 * with frag_coord/frag_pos..
3026 */
3027 inputs[block->ninputs++] = ctx->frag_face;
3028 ctx->frag_face->regs[0]->num = 0;
3029
3030 /* remaining channels not used, but let's avoid confusing
3031 * other parts that expect inputs to come in groups of vec4
3032 */
3033 inputs[block->ninputs++] = NULL;
3034 inputs[block->ninputs++] = NULL;
3035 inputs[block->ninputs++] = NULL;
3036 }
3037
3038 /* since we don't know where to set the regid for frag_coord,
3039 * we have to use r0.x for it. But we don't want to *always*
3040 * use r1.x for frag_pos as that could increase the register
3041 * footprint on simple shaders:
3042 */
3043 if (so->frag_coord) {
3044 ctx->frag_coord[0]->regs[0]->num = regid++;
3045 ctx->frag_coord[1]->regs[0]->num = regid++;
3046 ctx->frag_coord[2]->regs[0]->num = regid++;
3047 ctx->frag_coord[3]->regs[0]->num = regid++;
3048
3049 inputs[block->ninputs++] = ctx->frag_coord[0];
3050 inputs[block->ninputs++] = ctx->frag_coord[1];
3051 inputs[block->ninputs++] = ctx->frag_coord[2];
3052 inputs[block->ninputs++] = ctx->frag_coord[3];
3053 }
3054
3055 /* we always have frag_pos: */
3056 so->pos_regid = regid;
3057
3058 /* r0.x */
3059 instr = create_input(block, NULL, block->ninputs);
3060 instr->regs[0]->num = regid++;
3061 inputs[block->ninputs++] = instr;
3062 ctx->frag_pos->regs[1]->instr = instr;
3063
3064 /* r0.y */
3065 instr = create_input(block, NULL, block->ninputs);
3066 instr->regs[0]->num = regid++;
3067 inputs[block->ninputs++] = instr;
3068 ctx->frag_pos->regs[2]->instr = instr;
3069
3070 block->inputs = inputs;
3071 }
3072
3073 static void
3074 compile_instructions(struct ir3_compile_context *ctx)
3075 {
3076 push_block(ctx);
3077
3078 /* for fragment shader, we have a single input register (usually
3079 * r0.xy) which is used as the base for bary.f varying fetch instrs:
3080 */
3081 if (ctx->type == TGSI_PROCESSOR_FRAGMENT) {
3082 struct ir3_instruction *instr;
3083 instr = ir3_instr_create(ctx->block, -1, OPC_META_FI);
3084 ir3_reg_create(instr, 0, 0);
3085 ir3_reg_create(instr, 0, IR3_REG_SSA); /* r0.x */
3086 ir3_reg_create(instr, 0, IR3_REG_SSA); /* r0.y */
3087 ctx->frag_pos = instr;
3088 }
3089
3090 while (!tgsi_parse_end_of_tokens(&ctx->parser)) {
3091 tgsi_parse_token(&ctx->parser);
3092
3093 switch (ctx->parser.FullToken.Token.Type) {
3094 case TGSI_TOKEN_TYPE_DECLARATION: {
3095 struct tgsi_full_declaration *decl =
3096 &ctx->parser.FullToken.FullDeclaration;
3097 unsigned file = decl->Declaration.File;
3098 if (file == TGSI_FILE_OUTPUT) {
3099 decl_out(ctx, decl);
3100 } else if (file == TGSI_FILE_INPUT) {
3101 decl_in(ctx, decl);
3102 }
3103
3104 if ((file != TGSI_FILE_CONSTANT) && decl->Declaration.Array) {
3105 int aid = decl->Array.ArrayID + ctx->array_offsets[file];
3106
3107 compile_assert(ctx, aid < ARRAY_SIZE(ctx->array));
3108
3109 /* legacy ArrayID==0 stuff probably isn't going to work
3110 * well (and is at least untested).. let's just scream:
3111 */
3112 compile_assert(ctx, aid != 0);
3113
3114 ctx->array[aid].first = decl->Range.First;
3115 ctx->array[aid].last = decl->Range.Last;
3116 }
3117 break;
3118 }
3119 case TGSI_TOKEN_TYPE_IMMEDIATE: {
3120 /* TODO: if we know the immediate is small enough, and only
3121 * used with instructions that can embed an immediate, we
3122 * can skip this:
3123 */
3124 struct tgsi_full_immediate *imm =
3125 &ctx->parser.FullToken.FullImmediate;
3126 unsigned n = ctx->so->immediates_count++;
3127 compile_assert(ctx, n < ARRAY_SIZE(ctx->so->immediates));
3128 memcpy(ctx->so->immediates[n].val, imm->u, 16);
3129 break;
3130 }
3131 case TGSI_TOKEN_TYPE_INSTRUCTION: {
3132 struct tgsi_full_instruction *inst =
3133 &ctx->parser.FullToken.FullInstruction;
3134 unsigned opc = inst->Instruction.Opcode;
3135 const struct instr_translater *t = &translaters[opc];
3136
3137 if (t->fxn) {
3138 t->fxn(t, ctx, inst);
3139 ctx->num_internal_temps = 0;
3140
3141 compile_assert(ctx, !ctx->using_tmp_dst);
3142 } else {
3143 compile_error(ctx, "unknown TGSI opc: %s\n",
3144 tgsi_get_opcode_name(opc));
3145 }
3146
3147 switch (inst->Instruction.Saturate) {
3148 case TGSI_SAT_ZERO_ONE:
3149 create_clamp_imm(ctx, &inst->Dst[0].Register,
3150 fui(0.0), fui(1.0));
3151 break;
3152 case TGSI_SAT_MINUS_PLUS_ONE:
3153 create_clamp_imm(ctx, &inst->Dst[0].Register,
3154 fui(-1.0), fui(1.0));
3155 break;
3156 }
3157
3158 instr_finish(ctx);
3159
3160 break;
3161 }
3162 default:
3163 break;
3164 }
3165 }
3166 }
3167
3168 static void
3169 compile_dump(struct ir3_compile_context *ctx)
3170 {
3171 const char *name = (ctx->so->type == SHADER_VERTEX) ? "vert" : "frag";
3172 static unsigned n = 0;
3173 char fname[16];
3174 FILE *f;
3175 snprintf(fname, sizeof(fname), "%s-%04u.dot", name, n++);
3176 f = fopen(fname, "w");
3177 if (!f)
3178 return;
3179 ir3_block_depth(ctx->block);
3180 ir3_dump(ctx->ir, name, ctx->block, f);
3181 fclose(f);
3182 }
3183
3184 int
3185 ir3_compile_shader(struct ir3_shader_variant *so,
3186 const struct tgsi_token *tokens, struct ir3_shader_key key,
3187 bool cp)
3188 {
3189 struct ir3_compile_context ctx;
3190 struct ir3_block *block;
3191 struct ir3_instruction **inputs;
3192 unsigned i, j, actual_in;
3193 int ret = 0, max_bary;
3194
3195 assert(!so->ir);
3196
3197 so->ir = ir3_create();
3198
3199 assert(so->ir);
3200
3201 if (compile_init(&ctx, so, tokens) != TGSI_PARSE_OK) {
3202 DBG("INIT failed!");
3203 ret = -1;
3204 goto out;
3205 }
3206
3207 compile_instructions(&ctx);
3208
3209 block = ctx.block;
3210 so->ir->block = block;
3211
3212 /* keep track of the inputs from TGSI perspective.. */
3213 inputs = block->inputs;
3214
3215 /* but fixup actual inputs for frag shader: */
3216 if (ctx.type == TGSI_PROCESSOR_FRAGMENT)
3217 fixup_frag_inputs(&ctx);
3218
3219 /* at this point, for binning pass, throw away unneeded outputs: */
3220 if (key.binning_pass) {
3221 for (i = 0, j = 0; i < so->outputs_count; i++) {
3222 unsigned name = sem2name(so->outputs[i].semantic);
3223 unsigned idx = sem2name(so->outputs[i].semantic);
3224
3225 /* throw away everything but first position/psize */
3226 if ((idx == 0) && ((name == TGSI_SEMANTIC_POSITION) ||
3227 (name == TGSI_SEMANTIC_PSIZE))) {
3228 if (i != j) {
3229 so->outputs[j] = so->outputs[i];
3230 block->outputs[(j*4)+0] = block->outputs[(i*4)+0];
3231 block->outputs[(j*4)+1] = block->outputs[(i*4)+1];
3232 block->outputs[(j*4)+2] = block->outputs[(i*4)+2];
3233 block->outputs[(j*4)+3] = block->outputs[(i*4)+3];
3234 }
3235 j++;
3236 }
3237 }
3238 so->outputs_count = j;
3239 block->noutputs = j * 4;
3240 }
3241
3242 /* for rendering to alpha format, we only need the .w component,
3243 * and we need it to be in the .x position:
3244 */
3245 if (key.alpha) {
3246 for (i = 0, j = 0; i < so->outputs_count; i++) {
3247 unsigned name = sem2name(so->outputs[i].semantic);
3248
3249 /* move .w component to .x and discard others: */
3250 if (name == TGSI_SEMANTIC_COLOR) {
3251 block->outputs[(i*4)+0] = block->outputs[(i*4)+3];
3252 block->outputs[(i*4)+1] = NULL;
3253 block->outputs[(i*4)+2] = NULL;
3254 block->outputs[(i*4)+3] = NULL;
3255 }
3256 }
3257 }
3258
3259 /* if we want half-precision outputs, mark the output registers
3260 * as half:
3261 */
3262 if (key.half_precision) {
3263 for (i = 0; i < block->noutputs; i++) {
3264 if (!block->outputs[i])
3265 continue;
3266 block->outputs[i]->regs[0]->flags |= IR3_REG_HALF;
3267 }
3268 }
3269
3270 /* at this point, we want the kill's in the outputs array too,
3271 * so that they get scheduled (since they have no dst).. we've
3272 * already ensured that the array is big enough in push_block():
3273 */
3274 if (ctx.type == TGSI_PROCESSOR_FRAGMENT) {
3275 for (i = 0; i < ctx.kill_count; i++)
3276 block->outputs[block->noutputs++] = ctx.kill[i];
3277 }
3278
3279 if (fd_mesa_debug & FD_DBG_OPTDUMP)
3280 compile_dump(&ctx);
3281
3282 ret = ir3_block_flatten(block);
3283 if (ret < 0) {
3284 DBG("FLATTEN failed!");
3285 goto out;
3286 }
3287 if ((ret > 0) && (fd_mesa_debug & FD_DBG_OPTDUMP))
3288 compile_dump(&ctx);
3289
3290 if (fd_mesa_debug & FD_DBG_OPTMSGS) {
3291 printf("BEFORE CP:\n");
3292 ir3_dump_instr_list(block->head);
3293 }
3294
3295 ir3_block_depth(block);
3296
3297 /* First remove all the extra mov's (which we could skip if the
3298 * front-end was clever enough not to insert them in the first
3299 * place). Then figure out left/right neighbors, re-inserting
3300 * extra mov's when needed to avoid conflicts.
3301 */
3302 if (cp && !(fd_mesa_debug & FD_DBG_NOCP))
3303 ir3_block_cp(block);
3304
3305 if (fd_mesa_debug & FD_DBG_OPTMSGS) {
3306 printf("BEFORE GROUPING:\n");
3307 ir3_dump_instr_list(block->head);
3308 }
3309
3310 /* Group left/right neighbors, inserting mov's where needed to
3311 * solve conflicts:
3312 */
3313 ir3_block_group(block);
3314
3315 if (fd_mesa_debug & FD_DBG_OPTDUMP)
3316 compile_dump(&ctx);
3317
3318 ir3_block_depth(block);
3319
3320 if (fd_mesa_debug & FD_DBG_OPTMSGS) {
3321 printf("AFTER DEPTH:\n");
3322 ir3_dump_instr_list(block->head);
3323 }
3324
3325 ret = ir3_block_sched(block);
3326 if (ret) {
3327 DBG("SCHED failed!");
3328 goto out;
3329 }
3330
3331 if (fd_mesa_debug & FD_DBG_OPTMSGS) {
3332 printf("AFTER SCHED:\n");
3333 ir3_dump_instr_list(block->head);
3334 }
3335
3336 ret = ir3_block_ra(block, so->type, so->frag_coord, so->frag_face);
3337 if (ret) {
3338 DBG("RA failed!");
3339 goto out;
3340 }
3341
3342 if (fd_mesa_debug & FD_DBG_OPTMSGS) {
3343 printf("AFTER RA:\n");
3344 ir3_dump_instr_list(block->head);
3345 }
3346
3347 ir3_block_legalize(block, &so->has_samp, &max_bary);
3348
3349 /* fixup input/outputs: */
3350 for (i = 0; i < so->outputs_count; i++) {
3351 so->outputs[i].regid = block->outputs[i*4]->regs[0]->num;
3352 /* preserve hack for depth output.. tgsi writes depth to .z,
3353 * but what we give the hw is the scalar register:
3354 */
3355 if ((ctx.type == TGSI_PROCESSOR_FRAGMENT) &&
3356 (sem2name(so->outputs[i].semantic) == TGSI_SEMANTIC_POSITION))
3357 so->outputs[i].regid += 2;
3358 }
3359 /* Note that some or all channels of an input may be unused: */
3360 actual_in = 0;
3361 for (i = 0; i < so->inputs_count; i++) {
3362 unsigned j, regid = ~0, compmask = 0;
3363 so->inputs[i].ncomp = 0;
3364 for (j = 0; j < 4; j++) {
3365 struct ir3_instruction *in = inputs[(i*4) + j];
3366 if (in) {
3367 compmask |= (1 << j);
3368 regid = in->regs[0]->num - j;
3369 actual_in++;
3370 so->inputs[i].ncomp++;
3371 }
3372 }
3373 so->inputs[i].regid = regid;
3374 so->inputs[i].compmask = compmask;
3375 }
3376
3377 /* fragment shader always gets full vec4's even if it doesn't
3378 * fetch all components, but vertex shader we need to update
3379 * with the actual number of components fetch, otherwise thing
3380 * will hang due to mismaptch between VFD_DECODE's and
3381 * TOTALATTRTOVS
3382 */
3383 if (so->type == SHADER_VERTEX)
3384 so->total_in = actual_in;
3385 else
3386 so->total_in = align(max_bary + 1, 4);
3387
3388 out:
3389 if (ret) {
3390 ir3_destroy(so->ir);
3391 so->ir = NULL;
3392 }
3393 compile_free(&ctx);
3394
3395 return ret;
3396 }