freedreno/a3xx: more texture array fixes
[mesa.git] / src / gallium / drivers / freedreno / ir3 / ir3_compiler.c
1 /* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
2
3 /*
4 * Copyright (C) 2013 Rob Clark <robclark@freedesktop.org>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * SOFTWARE.
24 *
25 * Authors:
26 * Rob Clark <robclark@freedesktop.org>
27 */
28
29 #include <stdarg.h>
30
31 #include "pipe/p_state.h"
32 #include "util/u_string.h"
33 #include "util/u_memory.h"
34 #include "util/u_inlines.h"
35 #include "tgsi/tgsi_parse.h"
36 #include "tgsi/tgsi_ureg.h"
37 #include "tgsi/tgsi_info.h"
38 #include "tgsi/tgsi_strings.h"
39 #include "tgsi/tgsi_dump.h"
40 #include "tgsi/tgsi_scan.h"
41
42 #include "freedreno_lowering.h"
43 #include "freedreno_util.h"
44
45 #include "ir3_compiler.h"
46 #include "ir3_shader.h"
47
48 #include "instr-a3xx.h"
49 #include "ir3.h"
50
51 struct ir3_compile_context {
52 const struct tgsi_token *tokens;
53 bool free_tokens;
54 struct ir3 *ir;
55 struct ir3_shader_variant *so;
56
57 struct ir3_block *block;
58 struct ir3_instruction *current_instr;
59
60 /* we need to defer updates to block->outputs[] until the end
61 * of an instruction (so we don't see new value until *after*
62 * the src registers are processed)
63 */
64 struct {
65 struct ir3_instruction *instr, **instrp;
66 } output_updates[16];
67 unsigned num_output_updates;
68
69 /* are we in a sequence of "atomic" instructions?
70 */
71 bool atomic;
72
73 /* For fragment shaders, from the hw perspective the only
74 * actual input is r0.xy position register passed to bary.f.
75 * But TGSI doesn't know that, it still declares things as
76 * IN[] registers. So we do all the input tracking normally
77 * and fix things up after compile_instructions()
78 *
79 * NOTE that frag_pos is the hardware position (possibly it
80 * is actually an index or tag or some such.. it is *not*
81 * values that can be directly used for gl_FragCoord..)
82 */
83 struct ir3_instruction *frag_pos, *frag_face, *frag_coord[4];
84
85 struct tgsi_parse_context parser;
86 unsigned type;
87
88 struct tgsi_shader_info info;
89
90 /* for calculating input/output positions/linkages: */
91 unsigned next_inloc;
92
93 unsigned num_internal_temps;
94 struct tgsi_src_register internal_temps[6];
95
96 /* idx/slot for last compiler generated immediate */
97 unsigned immediate_idx;
98
99 /* stack of branch instructions that mark (potentially nested)
100 * branch if/else/loop/etc
101 */
102 struct {
103 struct ir3_instruction *instr, *cond;
104 bool inv; /* true iff in else leg of branch */
105 } branch[16];
106 unsigned int branch_count;
107
108 /* list of kill instructions: */
109 struct ir3_instruction *kill[16];
110 unsigned int kill_count;
111
112 /* used when dst is same as one of the src, to avoid overwriting a
113 * src element before the remaining scalar instructions that make
114 * up the vector operation
115 */
116 struct tgsi_dst_register tmp_dst;
117 struct tgsi_src_register *tmp_src;
118
119 /* just for catching incorrect use of get_dst()/put_dst():
120 */
121 bool using_tmp_dst;
122 };
123
124
125 static void vectorize(struct ir3_compile_context *ctx,
126 struct ir3_instruction *instr, struct tgsi_dst_register *dst,
127 int nsrcs, ...);
128 static void create_mov(struct ir3_compile_context *ctx,
129 struct tgsi_dst_register *dst, struct tgsi_src_register *src);
130 static type_t get_ftype(struct ir3_compile_context *ctx);
131
132 static unsigned
133 compile_init(struct ir3_compile_context *ctx, struct ir3_shader_variant *so,
134 const struct tgsi_token *tokens)
135 {
136 unsigned ret;
137 struct tgsi_shader_info *info = &ctx->info;
138 const struct fd_lowering_config lconfig = {
139 .color_two_side = so->key.color_two_side,
140 .lower_DST = true,
141 .lower_XPD = true,
142 .lower_SCS = true,
143 .lower_LRP = true,
144 .lower_FRC = true,
145 .lower_POW = true,
146 .lower_LIT = true,
147 .lower_EXP = true,
148 .lower_LOG = true,
149 .lower_DP4 = true,
150 .lower_DP3 = true,
151 .lower_DPH = true,
152 .lower_DP2 = true,
153 .lower_DP2A = true,
154 };
155
156 ctx->tokens = fd_transform_lowering(&lconfig, tokens, &ctx->info);
157 ctx->free_tokens = !!ctx->tokens;
158 if (!ctx->tokens) {
159 /* no lowering */
160 ctx->tokens = tokens;
161 }
162 ctx->ir = so->ir;
163 ctx->so = so;
164 ctx->next_inloc = 8;
165 ctx->num_internal_temps = 0;
166 ctx->branch_count = 0;
167 ctx->kill_count = 0;
168 ctx->block = NULL;
169 ctx->current_instr = NULL;
170 ctx->num_output_updates = 0;
171 ctx->atomic = false;
172 ctx->frag_pos = NULL;
173 ctx->frag_face = NULL;
174 ctx->tmp_src = NULL;
175 ctx->using_tmp_dst = false;
176
177 memset(ctx->frag_coord, 0, sizeof(ctx->frag_coord));
178
179 #define FM(x) (1 << TGSI_FILE_##x)
180 /* optimize can't deal with relative addressing: */
181 if (info->indirect_files & (FM(TEMPORARY) | FM(INPUT) | FM(OUTPUT)))
182 return TGSI_PARSE_ERROR;
183
184 /* NOTE: if relative addressing is used, we set constlen in
185 * the compiler (to worst-case value) since we don't know in
186 * the assembler what the max addr reg value can be:
187 */
188 if (info->indirect_files & FM(CONSTANT))
189 so->constlen = 4 * (ctx->info.file_max[TGSI_FILE_CONSTANT] + 1);
190
191 /* Immediates go after constants: */
192 so->first_immediate = info->file_max[TGSI_FILE_CONSTANT] + 1;
193 ctx->immediate_idx = 4 * (ctx->info.file_max[TGSI_FILE_IMMEDIATE] + 1);
194
195 ret = tgsi_parse_init(&ctx->parser, ctx->tokens);
196 if (ret != TGSI_PARSE_OK)
197 return ret;
198
199 ctx->type = ctx->parser.FullHeader.Processor.Processor;
200
201 return ret;
202 }
203
204 static void
205 compile_error(struct ir3_compile_context *ctx, const char *format, ...)
206 {
207 va_list ap;
208 va_start(ap, format);
209 _debug_vprintf(format, ap);
210 va_end(ap);
211 tgsi_dump(ctx->tokens, 0);
212 debug_assert(0);
213 }
214
215 #define compile_assert(ctx, cond) do { \
216 if (!(cond)) compile_error((ctx), "failed assert: "#cond"\n"); \
217 } while (0)
218
219 static void
220 compile_free(struct ir3_compile_context *ctx)
221 {
222 if (ctx->free_tokens)
223 free((void *)ctx->tokens);
224 tgsi_parse_free(&ctx->parser);
225 }
226
227 struct instr_translater {
228 void (*fxn)(const struct instr_translater *t,
229 struct ir3_compile_context *ctx,
230 struct tgsi_full_instruction *inst);
231 unsigned tgsi_opc;
232 opc_t opc;
233 opc_t hopc; /* opc to use for half_precision mode, if different */
234 unsigned arg;
235 };
236
237 static void
238 instr_finish(struct ir3_compile_context *ctx)
239 {
240 unsigned i;
241
242 if (ctx->atomic)
243 return;
244
245 for (i = 0; i < ctx->num_output_updates; i++)
246 *(ctx->output_updates[i].instrp) = ctx->output_updates[i].instr;
247
248 ctx->num_output_updates = 0;
249 }
250
251 /* For "atomic" groups of instructions, for example the four scalar
252 * instructions to perform a vec4 operation. Basically this just
253 * blocks out handling of output_updates so the next scalar instruction
254 * still sees the result from before the start of the atomic group.
255 *
256 * NOTE: when used properly, this could probably replace get/put_dst()
257 * stuff.
258 */
259 static void
260 instr_atomic_start(struct ir3_compile_context *ctx)
261 {
262 ctx->atomic = true;
263 }
264
265 static void
266 instr_atomic_end(struct ir3_compile_context *ctx)
267 {
268 ctx->atomic = false;
269 instr_finish(ctx);
270 }
271
272 static struct ir3_instruction *
273 instr_create(struct ir3_compile_context *ctx, int category, opc_t opc)
274 {
275 instr_finish(ctx);
276 return (ctx->current_instr = ir3_instr_create(ctx->block, category, opc));
277 }
278
279 static struct ir3_instruction *
280 instr_clone(struct ir3_compile_context *ctx, struct ir3_instruction *instr)
281 {
282 instr_finish(ctx);
283 return (ctx->current_instr = ir3_instr_clone(instr));
284 }
285
286 static struct ir3_block *
287 push_block(struct ir3_compile_context *ctx)
288 {
289 struct ir3_block *block;
290 unsigned ntmp, nin, nout;
291
292 #define SCALAR_REGS(file) (4 * (ctx->info.file_max[TGSI_FILE_ ## file] + 1))
293
294 /* hmm, give ourselves room to create 4 extra temporaries (vec4):
295 */
296 ntmp = SCALAR_REGS(TEMPORARY);
297 ntmp += 4 * 4;
298
299 nout = SCALAR_REGS(OUTPUT);
300 nin = SCALAR_REGS(INPUT);
301
302 /* for outermost block, 'inputs' are the actual shader INPUT
303 * register file. Reads from INPUT registers always go back to
304 * top block. For nested blocks, 'inputs' is used to track any
305 * TEMPORARY file register from one of the enclosing blocks that
306 * is ready in this block.
307 */
308 if (!ctx->block) {
309 /* NOTE: fragment shaders actually have two inputs (r0.xy, the
310 * position)
311 */
312 if (ctx->type == TGSI_PROCESSOR_FRAGMENT) {
313 int n = 2;
314 if (ctx->info.reads_position)
315 n += 4;
316 if (ctx->info.uses_frontface)
317 n += 4;
318 nin = MAX2(n, nin);
319 nout += ARRAY_SIZE(ctx->kill);
320 }
321 } else {
322 nin = ntmp;
323 }
324
325 block = ir3_block_create(ctx->ir, ntmp, nin, nout);
326
327 if ((ctx->type == TGSI_PROCESSOR_FRAGMENT) && !ctx->block)
328 block->noutputs -= ARRAY_SIZE(ctx->kill);
329
330 block->parent = ctx->block;
331 ctx->block = block;
332
333 return block;
334 }
335
336 static void
337 pop_block(struct ir3_compile_context *ctx)
338 {
339 ctx->block = ctx->block->parent;
340 compile_assert(ctx, ctx->block);
341 }
342
343 static struct ir3_instruction *
344 create_output(struct ir3_block *block, struct ir3_instruction *instr,
345 unsigned n)
346 {
347 struct ir3_instruction *out;
348
349 out = ir3_instr_create(block, -1, OPC_META_OUTPUT);
350 out->inout.block = block;
351 ir3_reg_create(out, n, 0);
352 if (instr)
353 ir3_reg_create(out, 0, IR3_REG_SSA)->instr = instr;
354
355 return out;
356 }
357
358 static struct ir3_instruction *
359 create_input(struct ir3_block *block, struct ir3_instruction *instr,
360 unsigned n)
361 {
362 struct ir3_instruction *in;
363
364 in = ir3_instr_create(block, -1, OPC_META_INPUT);
365 in->inout.block = block;
366 ir3_reg_create(in, n, 0);
367 if (instr)
368 ir3_reg_create(in, 0, IR3_REG_SSA)->instr = instr;
369
370 return in;
371 }
372
373 static struct ir3_instruction *
374 block_input(struct ir3_block *block, unsigned n)
375 {
376 /* references to INPUT register file always go back up to
377 * top level:
378 */
379 if (block->parent)
380 return block_input(block->parent, n);
381 return block->inputs[n];
382 }
383
384 /* return temporary in scope, creating if needed meta-input node
385 * to track block inputs
386 */
387 static struct ir3_instruction *
388 block_temporary(struct ir3_block *block, unsigned n)
389 {
390 /* references to TEMPORARY register file, find the nearest
391 * enclosing block which has already assigned this temporary,
392 * creating meta-input instructions along the way to keep
393 * track of block inputs
394 */
395 if (block->parent && !block->temporaries[n]) {
396 /* if already have input for this block, reuse: */
397 if (!block->inputs[n])
398 block->inputs[n] = block_temporary(block->parent, n);
399
400 /* and create new input to return: */
401 return create_input(block, block->inputs[n], n);
402 }
403 return block->temporaries[n];
404 }
405
406 static struct ir3_instruction *
407 create_immed(struct ir3_compile_context *ctx, float val)
408 {
409 /* NOTE: *don't* use instr_create() here!
410 */
411 struct ir3_instruction *instr;
412 instr = ir3_instr_create(ctx->block, 1, 0);
413 instr->cat1.src_type = get_ftype(ctx);
414 instr->cat1.dst_type = get_ftype(ctx);
415 ir3_reg_create(instr, 0, 0);
416 ir3_reg_create(instr, 0, IR3_REG_IMMED)->fim_val = val;
417 return instr;
418 }
419
420 static void
421 ssa_dst(struct ir3_compile_context *ctx, struct ir3_instruction *instr,
422 const struct tgsi_dst_register *dst, unsigned chan)
423 {
424 unsigned n = regid(dst->Index, chan);
425 unsigned idx = ctx->num_output_updates;
426
427 compile_assert(ctx, idx < ARRAY_SIZE(ctx->output_updates));
428
429 /* NOTE: defer update of temporaries[idx] or output[idx]
430 * until instr_finish(), so that if the current instruction
431 * reads the same TEMP/OUT[] it gets the old value:
432 *
433 * bleh.. this might be a bit easier to just figure out
434 * in instr_finish(). But at that point we've already
435 * lost information about OUTPUT vs TEMPORARY register
436 * file..
437 */
438
439 switch (dst->File) {
440 case TGSI_FILE_OUTPUT:
441 compile_assert(ctx, n < ctx->block->noutputs);
442 ctx->output_updates[idx].instrp = &ctx->block->outputs[n];
443 ctx->output_updates[idx].instr = instr;
444 ctx->num_output_updates++;
445 break;
446 case TGSI_FILE_TEMPORARY:
447 compile_assert(ctx, n < ctx->block->ntemporaries);
448 ctx->output_updates[idx].instrp = &ctx->block->temporaries[n];
449 ctx->output_updates[idx].instr = instr;
450 ctx->num_output_updates++;
451 break;
452 case TGSI_FILE_ADDRESS:
453 compile_assert(ctx, n < 1);
454 ctx->output_updates[idx].instrp = &ctx->block->address;
455 ctx->output_updates[idx].instr = instr;
456 ctx->num_output_updates++;
457 break;
458 }
459 }
460
461 static void
462 ssa_src(struct ir3_compile_context *ctx, struct ir3_register *reg,
463 const struct tgsi_src_register *src, unsigned chan)
464 {
465 struct ir3_block *block = ctx->block;
466 unsigned n = regid(src->Index, chan);
467
468 switch (src->File) {
469 case TGSI_FILE_INPUT:
470 reg->flags |= IR3_REG_SSA;
471 reg->instr = block_input(ctx->block, n);
472 break;
473 case TGSI_FILE_OUTPUT:
474 /* really this should just happen in case of 'MOV_SAT OUT[n], ..',
475 * for the following clamp instructions:
476 */
477 reg->flags |= IR3_REG_SSA;
478 reg->instr = block->outputs[n];
479 /* we don't have to worry about read from an OUTPUT that was
480 * assigned outside of the current block, because the _SAT
481 * clamp instructions will always be in the same block as
482 * the original instruction which wrote the OUTPUT
483 */
484 compile_assert(ctx, reg->instr);
485 break;
486 case TGSI_FILE_TEMPORARY:
487 reg->flags |= IR3_REG_SSA;
488 reg->instr = block_temporary(ctx->block, n);
489 break;
490 }
491
492 if ((reg->flags & IR3_REG_SSA) && !reg->instr) {
493 /* this can happen when registers (or components of a TGSI
494 * register) are used as src before they have been assigned
495 * (undefined contents). To avoid confusing the rest of the
496 * compiler, and to generally keep things peachy, substitute
497 * an instruction that sets the src to 0.0. Or to keep
498 * things undefined, I could plug in a random number? :-P
499 *
500 * NOTE: *don't* use instr_create() here!
501 */
502 reg->instr = create_immed(ctx, 0.0);
503 }
504 }
505
506 static struct ir3_register *
507 add_dst_reg_wrmask(struct ir3_compile_context *ctx,
508 struct ir3_instruction *instr, const struct tgsi_dst_register *dst,
509 unsigned chan, unsigned wrmask)
510 {
511 unsigned flags = 0, num = 0;
512 struct ir3_register *reg;
513
514 switch (dst->File) {
515 case TGSI_FILE_OUTPUT:
516 case TGSI_FILE_TEMPORARY:
517 /* uses SSA */
518 break;
519 case TGSI_FILE_ADDRESS:
520 flags |= IR3_REG_ADDR;
521 /* uses SSA */
522 break;
523 default:
524 compile_error(ctx, "unsupported dst register file: %s\n",
525 tgsi_file_name(dst->File));
526 break;
527 }
528
529 if (dst->Indirect)
530 flags |= IR3_REG_RELATIV;
531
532 reg = ir3_reg_create(instr, regid(num, chan), flags);
533
534 /* NOTE: do not call ssa_dst() if atomic.. vectorize()
535 * itself will call ssa_dst(). This is to filter out
536 * the (initially bogus) .x component dst which is
537 * created (but not necessarily used, ie. if the net
538 * result of the vector operation does not write to
539 * the .x component)
540 */
541
542 reg->wrmask = wrmask;
543 if (wrmask == 0x1) {
544 /* normal case */
545 if (!ctx->atomic)
546 ssa_dst(ctx, instr, dst, chan);
547 } else if ((dst->File == TGSI_FILE_TEMPORARY) ||
548 (dst->File == TGSI_FILE_OUTPUT) ||
549 (dst->File == TGSI_FILE_ADDRESS)) {
550 unsigned i;
551
552 /* if instruction writes multiple, we need to create
553 * some place-holder collect the registers:
554 */
555 for (i = 0; i < 4; i++) {
556 if (wrmask & (1 << i)) {
557 struct ir3_instruction *collect =
558 ir3_instr_create(ctx->block, -1, OPC_META_FO);
559 collect->fo.off = i;
560 /* unused dst reg: */
561 ir3_reg_create(collect, 0, 0);
562 /* and src reg used to hold original instr */
563 ir3_reg_create(collect, 0, IR3_REG_SSA)->instr = instr;
564 if (!ctx->atomic)
565 ssa_dst(ctx, collect, dst, chan+i);
566 }
567 }
568 }
569
570 return reg;
571 }
572
573 static struct ir3_register *
574 add_dst_reg(struct ir3_compile_context *ctx, struct ir3_instruction *instr,
575 const struct tgsi_dst_register *dst, unsigned chan)
576 {
577 return add_dst_reg_wrmask(ctx, instr, dst, chan, 0x1);
578 }
579
580 static struct ir3_register *
581 add_src_reg_wrmask(struct ir3_compile_context *ctx,
582 struct ir3_instruction *instr, const struct tgsi_src_register *src,
583 unsigned chan, unsigned wrmask)
584 {
585 unsigned flags = 0, num = 0;
586 struct ir3_register *reg;
587 struct ir3_instruction *orig = NULL;
588
589 /* TODO we need to use a mov to temp for const >= 64.. or maybe
590 * we could use relative addressing..
591 */
592 compile_assert(ctx, src->Index < 64);
593
594 switch (src->File) {
595 case TGSI_FILE_IMMEDIATE:
596 /* TODO if possible, use actual immediate instead of const.. but
597 * TGSI has vec4 immediates, we can only embed scalar (of limited
598 * size, depending on instruction..)
599 */
600 flags |= IR3_REG_CONST;
601 num = src->Index + ctx->so->first_immediate;
602 break;
603 case TGSI_FILE_CONSTANT:
604 flags |= IR3_REG_CONST;
605 num = src->Index;
606 break;
607 case TGSI_FILE_OUTPUT:
608 /* NOTE: we should only end up w/ OUTPUT file for things like
609 * clamp()'ing saturated dst instructions
610 */
611 case TGSI_FILE_INPUT:
612 case TGSI_FILE_TEMPORARY:
613 /* uses SSA */
614 break;
615 default:
616 compile_error(ctx, "unsupported src register file: %s\n",
617 tgsi_file_name(src->File));
618 break;
619 }
620
621 if (src->Absolute)
622 flags |= IR3_REG_ABS;
623 if (src->Negate)
624 flags |= IR3_REG_NEGATE;
625
626 if (src->Indirect) {
627 flags |= IR3_REG_RELATIV;
628
629 /* shouldn't happen, and we can't cope with it below: */
630 compile_assert(ctx, wrmask == 0x1);
631
632 /* wrap in a meta-deref to track both the src and address: */
633 orig = instr;
634
635 instr = ir3_instr_create(ctx->block, -1, OPC_META_DEREF);
636 ir3_reg_create(instr, 0, 0);
637 ir3_reg_create(instr, 0, IR3_REG_SSA)->instr = ctx->block->address;
638 }
639
640 reg = ir3_reg_create(instr, regid(num, chan), flags);
641
642 reg->wrmask = wrmask;
643 if (wrmask == 0x1) {
644 /* normal case */
645 ssa_src(ctx, reg, src, chan);
646 } else if ((src->File == TGSI_FILE_TEMPORARY) ||
647 (src->File == TGSI_FILE_OUTPUT) ||
648 (src->File == TGSI_FILE_INPUT)) {
649 struct ir3_instruction *collect;
650 unsigned i;
651
652 compile_assert(ctx, !src->Indirect);
653
654 /* if instruction reads multiple, we need to create
655 * some place-holder collect the registers:
656 */
657 collect = ir3_instr_create(ctx->block, -1, OPC_META_FI);
658 ir3_reg_create(collect, 0, 0); /* unused dst reg */
659
660 for (i = 0; i < 4; i++) {
661 if (wrmask & (1 << i)) {
662 /* and src reg used point to the original instr */
663 ssa_src(ctx, ir3_reg_create(collect, 0, IR3_REG_SSA),
664 src, chan + i);
665 } else if (wrmask & ~((i << i) - 1)) {
666 /* if any remaining components, then dummy
667 * placeholder src reg to fill in the blanks:
668 */
669 ir3_reg_create(collect, 0, 0);
670 }
671 }
672
673 reg->flags |= IR3_REG_SSA;
674 reg->instr = collect;
675 }
676
677 if (src->Indirect) {
678 reg = ir3_reg_create(orig, 0, flags | IR3_REG_SSA);
679 reg->instr = instr;
680 }
681 return reg;
682 }
683
684 static struct ir3_register *
685 add_src_reg(struct ir3_compile_context *ctx, struct ir3_instruction *instr,
686 const struct tgsi_src_register *src, unsigned chan)
687 {
688 return add_src_reg_wrmask(ctx, instr, src, chan, 0x1);
689 }
690
691 static void
692 src_from_dst(struct tgsi_src_register *src, struct tgsi_dst_register *dst)
693 {
694 src->File = dst->File;
695 src->Indirect = dst->Indirect;
696 src->Dimension = dst->Dimension;
697 src->Index = dst->Index;
698 src->Absolute = 0;
699 src->Negate = 0;
700 src->SwizzleX = TGSI_SWIZZLE_X;
701 src->SwizzleY = TGSI_SWIZZLE_Y;
702 src->SwizzleZ = TGSI_SWIZZLE_Z;
703 src->SwizzleW = TGSI_SWIZZLE_W;
704 }
705
706 /* Get internal-temp src/dst to use for a sequence of instructions
707 * generated by a single TGSI op.
708 */
709 static struct tgsi_src_register *
710 get_internal_temp(struct ir3_compile_context *ctx,
711 struct tgsi_dst_register *tmp_dst)
712 {
713 struct tgsi_src_register *tmp_src;
714 int n;
715
716 tmp_dst->File = TGSI_FILE_TEMPORARY;
717 tmp_dst->WriteMask = TGSI_WRITEMASK_XYZW;
718 tmp_dst->Indirect = 0;
719 tmp_dst->Dimension = 0;
720
721 /* assign next temporary: */
722 n = ctx->num_internal_temps++;
723 compile_assert(ctx, n < ARRAY_SIZE(ctx->internal_temps));
724 tmp_src = &ctx->internal_temps[n];
725
726 tmp_dst->Index = ctx->info.file_max[TGSI_FILE_TEMPORARY] + n + 1;
727
728 src_from_dst(tmp_src, tmp_dst);
729
730 return tmp_src;
731 }
732
733 static inline bool
734 is_const(struct tgsi_src_register *src)
735 {
736 return (src->File == TGSI_FILE_CONSTANT) ||
737 (src->File == TGSI_FILE_IMMEDIATE);
738 }
739
740 static inline bool
741 is_relative(struct tgsi_src_register *src)
742 {
743 return src->Indirect;
744 }
745
746 static inline bool
747 is_rel_or_const(struct tgsi_src_register *src)
748 {
749 return is_relative(src) || is_const(src);
750 }
751
752 static type_t
753 get_ftype(struct ir3_compile_context *ctx)
754 {
755 return TYPE_F32;
756 }
757
758 static type_t
759 get_utype(struct ir3_compile_context *ctx)
760 {
761 return TYPE_U32;
762 }
763
764 static unsigned
765 src_swiz(struct tgsi_src_register *src, int chan)
766 {
767 switch (chan) {
768 case 0: return src->SwizzleX;
769 case 1: return src->SwizzleY;
770 case 2: return src->SwizzleZ;
771 case 3: return src->SwizzleW;
772 }
773 assert(0);
774 return 0;
775 }
776
777 /* for instructions that cannot take a const register as src, if needed
778 * generate a move to temporary gpr:
779 */
780 static struct tgsi_src_register *
781 get_unconst(struct ir3_compile_context *ctx, struct tgsi_src_register *src)
782 {
783 struct tgsi_dst_register tmp_dst;
784 struct tgsi_src_register *tmp_src;
785
786 compile_assert(ctx, is_rel_or_const(src));
787
788 tmp_src = get_internal_temp(ctx, &tmp_dst);
789
790 create_mov(ctx, &tmp_dst, src);
791
792 return tmp_src;
793 }
794
795 static void
796 get_immediate(struct ir3_compile_context *ctx,
797 struct tgsi_src_register *reg, uint32_t val)
798 {
799 unsigned neg, swiz, idx, i;
800 /* actually maps 1:1 currently.. not sure if that is safe to rely on: */
801 static const unsigned swiz2tgsi[] = {
802 TGSI_SWIZZLE_X, TGSI_SWIZZLE_Y, TGSI_SWIZZLE_Z, TGSI_SWIZZLE_W,
803 };
804
805 for (i = 0; i < ctx->immediate_idx; i++) {
806 swiz = i % 4;
807 idx = i / 4;
808
809 if (ctx->so->immediates[idx].val[swiz] == val) {
810 neg = 0;
811 break;
812 }
813
814 if (ctx->so->immediates[idx].val[swiz] == -val) {
815 neg = 1;
816 break;
817 }
818 }
819
820 if (i == ctx->immediate_idx) {
821 /* need to generate a new immediate: */
822 swiz = i % 4;
823 idx = i / 4;
824 neg = 0;
825 ctx->so->immediates[idx].val[swiz] = val;
826 ctx->so->immediates_count = idx + 1;
827 ctx->immediate_idx++;
828 }
829
830 reg->File = TGSI_FILE_IMMEDIATE;
831 reg->Indirect = 0;
832 reg->Dimension = 0;
833 reg->Index = idx;
834 reg->Absolute = 0;
835 reg->Negate = neg;
836 reg->SwizzleX = swiz2tgsi[swiz];
837 reg->SwizzleY = swiz2tgsi[swiz];
838 reg->SwizzleZ = swiz2tgsi[swiz];
839 reg->SwizzleW = swiz2tgsi[swiz];
840 }
841
842 static void
843 create_mov(struct ir3_compile_context *ctx, struct tgsi_dst_register *dst,
844 struct tgsi_src_register *src)
845 {
846 type_t type_mov = get_ftype(ctx);
847 unsigned i;
848
849 for (i = 0; i < 4; i++) {
850 /* move to destination: */
851 if (dst->WriteMask & (1 << i)) {
852 struct ir3_instruction *instr;
853
854 if (src->Absolute || src->Negate) {
855 /* can't have abs or neg on a mov instr, so use
856 * absneg.f instead to handle these cases:
857 */
858 instr = instr_create(ctx, 2, OPC_ABSNEG_F);
859 } else {
860 instr = instr_create(ctx, 1, 0);
861 instr->cat1.src_type = type_mov;
862 instr->cat1.dst_type = type_mov;
863 }
864
865 add_dst_reg(ctx, instr, dst, i);
866 add_src_reg(ctx, instr, src, src_swiz(src, i));
867 }
868 }
869 }
870
871 static void
872 create_clamp(struct ir3_compile_context *ctx,
873 struct tgsi_dst_register *dst, struct tgsi_src_register *val,
874 struct tgsi_src_register *minval, struct tgsi_src_register *maxval)
875 {
876 struct ir3_instruction *instr;
877
878 instr = instr_create(ctx, 2, OPC_MAX_F);
879 vectorize(ctx, instr, dst, 2, val, 0, minval, 0);
880
881 instr = instr_create(ctx, 2, OPC_MIN_F);
882 vectorize(ctx, instr, dst, 2, val, 0, maxval, 0);
883 }
884
885 static void
886 create_clamp_imm(struct ir3_compile_context *ctx,
887 struct tgsi_dst_register *dst,
888 uint32_t minval, uint32_t maxval)
889 {
890 struct tgsi_src_register minconst, maxconst;
891 struct tgsi_src_register src;
892
893 src_from_dst(&src, dst);
894
895 get_immediate(ctx, &minconst, minval);
896 get_immediate(ctx, &maxconst, maxval);
897
898 create_clamp(ctx, dst, &src, &minconst, &maxconst);
899 }
900
901 static struct tgsi_dst_register *
902 get_dst(struct ir3_compile_context *ctx, struct tgsi_full_instruction *inst)
903 {
904 struct tgsi_dst_register *dst = &inst->Dst[0].Register;
905 unsigned i;
906
907 compile_assert(ctx, !ctx->using_tmp_dst);
908 ctx->using_tmp_dst = true;
909
910 for (i = 0; i < inst->Instruction.NumSrcRegs; i++) {
911 struct tgsi_src_register *src = &inst->Src[i].Register;
912 if ((src->File == dst->File) && (src->Index == dst->Index)) {
913 if ((dst->WriteMask == TGSI_WRITEMASK_XYZW) &&
914 (src->SwizzleX == TGSI_SWIZZLE_X) &&
915 (src->SwizzleY == TGSI_SWIZZLE_Y) &&
916 (src->SwizzleZ == TGSI_SWIZZLE_Z) &&
917 (src->SwizzleW == TGSI_SWIZZLE_W))
918 continue;
919 ctx->tmp_src = get_internal_temp(ctx, &ctx->tmp_dst);
920 ctx->tmp_dst.WriteMask = dst->WriteMask;
921 dst = &ctx->tmp_dst;
922 break;
923 }
924 }
925 return dst;
926 }
927
928 static void
929 put_dst(struct ir3_compile_context *ctx, struct tgsi_full_instruction *inst,
930 struct tgsi_dst_register *dst)
931 {
932 compile_assert(ctx, ctx->using_tmp_dst);
933 ctx->using_tmp_dst = false;
934
935 /* if necessary, add mov back into original dst: */
936 if (dst != &inst->Dst[0].Register) {
937 create_mov(ctx, &inst->Dst[0].Register, ctx->tmp_src);
938 }
939 }
940
941 /* helper to generate the necessary repeat and/or additional instructions
942 * to turn a scalar instruction into a vector operation:
943 */
944 static void
945 vectorize(struct ir3_compile_context *ctx, struct ir3_instruction *instr,
946 struct tgsi_dst_register *dst, int nsrcs, ...)
947 {
948 va_list ap;
949 int i, j, n = 0;
950
951 instr_atomic_start(ctx);
952
953 add_dst_reg(ctx, instr, dst, TGSI_SWIZZLE_X);
954
955 va_start(ap, nsrcs);
956 for (j = 0; j < nsrcs; j++) {
957 struct tgsi_src_register *src =
958 va_arg(ap, struct tgsi_src_register *);
959 unsigned flags = va_arg(ap, unsigned);
960 struct ir3_register *reg;
961 if (flags & IR3_REG_IMMED) {
962 reg = ir3_reg_create(instr, 0, IR3_REG_IMMED);
963 /* this is an ugly cast.. should have put flags first! */
964 reg->iim_val = *(int *)&src;
965 } else {
966 reg = add_src_reg(ctx, instr, src, TGSI_SWIZZLE_X);
967 }
968 reg->flags |= flags & ~IR3_REG_NEGATE;
969 if (flags & IR3_REG_NEGATE)
970 reg->flags ^= IR3_REG_NEGATE;
971 }
972 va_end(ap);
973
974 for (i = 0; i < 4; i++) {
975 if (dst->WriteMask & (1 << i)) {
976 struct ir3_instruction *cur;
977
978 if (n++ == 0) {
979 cur = instr;
980 } else {
981 cur = instr_clone(ctx, instr);
982 }
983
984 ssa_dst(ctx, cur, dst, i);
985
986 /* fix-up dst register component: */
987 cur->regs[0]->num = regid(cur->regs[0]->num >> 2, i);
988
989 /* fix-up src register component: */
990 va_start(ap, nsrcs);
991 for (j = 0; j < nsrcs; j++) {
992 struct ir3_register *reg = cur->regs[j+1];
993 struct tgsi_src_register *src =
994 va_arg(ap, struct tgsi_src_register *);
995 unsigned flags = va_arg(ap, unsigned);
996 if (reg->flags & IR3_REG_SSA) {
997 ssa_src(ctx, reg, src, src_swiz(src, i));
998 } else if (!(flags & IR3_REG_IMMED)) {
999 reg->num = regid(reg->num >> 2, src_swiz(src, i));
1000 }
1001 }
1002 va_end(ap);
1003 }
1004 }
1005
1006 instr_atomic_end(ctx);
1007 }
1008
1009 /*
1010 * Handlers for TGSI instructions which do not have a 1:1 mapping to
1011 * native instructions:
1012 */
1013
1014 static void
1015 trans_clamp(const struct instr_translater *t,
1016 struct ir3_compile_context *ctx,
1017 struct tgsi_full_instruction *inst)
1018 {
1019 struct tgsi_dst_register *dst = get_dst(ctx, inst);
1020 struct tgsi_src_register *src0 = &inst->Src[0].Register;
1021 struct tgsi_src_register *src1 = &inst->Src[1].Register;
1022 struct tgsi_src_register *src2 = &inst->Src[2].Register;
1023
1024 create_clamp(ctx, dst, src0, src1, src2);
1025
1026 put_dst(ctx, inst, dst);
1027 }
1028
1029 /* ARL(x) = x, but mova from hrN.x to a0.. */
1030 static void
1031 trans_arl(const struct instr_translater *t,
1032 struct ir3_compile_context *ctx,
1033 struct tgsi_full_instruction *inst)
1034 {
1035 struct ir3_instruction *instr;
1036 struct tgsi_dst_register tmp_dst;
1037 struct tgsi_src_register *tmp_src;
1038 struct tgsi_dst_register *dst = &inst->Dst[0].Register;
1039 struct tgsi_src_register *src = &inst->Src[0].Register;
1040 unsigned chan = src->SwizzleX;
1041
1042 compile_assert(ctx, dst->File == TGSI_FILE_ADDRESS);
1043
1044 /* NOTE: we allocate a temporary from a flat register
1045 * namespace (ignoring half vs full). It turns out
1046 * not to really matter since registers get reassigned
1047 * later in ir3_ra which (hopefully!) can deal a bit
1048 * better with mixed half and full precision.
1049 */
1050 tmp_src = get_internal_temp(ctx, &tmp_dst);
1051
1052 /* cov.{u,f}{32,16}s16 Rtmp, Rsrc */
1053 instr = instr_create(ctx, 1, 0);
1054 instr->cat1.src_type = (t->tgsi_opc == TGSI_OPCODE_ARL) ?
1055 get_ftype(ctx) : get_utype(ctx);
1056 instr->cat1.dst_type = TYPE_S16;
1057 add_dst_reg(ctx, instr, &tmp_dst, chan)->flags |= IR3_REG_HALF;
1058 add_src_reg(ctx, instr, src, chan);
1059
1060 /* shl.b Rtmp, Rtmp, 2 */
1061 instr = instr_create(ctx, 2, OPC_SHL_B);
1062 add_dst_reg(ctx, instr, &tmp_dst, chan)->flags |= IR3_REG_HALF;
1063 add_src_reg(ctx, instr, tmp_src, chan)->flags |= IR3_REG_HALF;
1064 ir3_reg_create(instr, 0, IR3_REG_IMMED)->iim_val = 2;
1065
1066 /* mova a0, Rtmp */
1067 instr = instr_create(ctx, 1, 0);
1068 instr->cat1.src_type = TYPE_S16;
1069 instr->cat1.dst_type = TYPE_S16;
1070 add_dst_reg(ctx, instr, dst, 0)->flags |= IR3_REG_HALF;
1071 add_src_reg(ctx, instr, tmp_src, chan)->flags |= IR3_REG_HALF;
1072 }
1073
1074 /*
1075 * texture fetch/sample instructions:
1076 */
1077
1078 struct tex_info {
1079 int8_t order[4];
1080 unsigned src_wrmask, flags;
1081 };
1082
1083 static const struct tex_info *
1084 get_tex_info(struct ir3_compile_context *ctx,
1085 struct tgsi_full_instruction *inst)
1086 {
1087 static const struct tex_info tex1d = {
1088 .order = { 0, -1, -1, -1 }, /* coord.x */
1089 .src_wrmask = TGSI_WRITEMASK_XY,
1090 .flags = 0,
1091 };
1092 static const struct tex_info tex1ds = {
1093 .order = { 0, -1, 2, -1 }, /* coord.xz */
1094 .src_wrmask = TGSI_WRITEMASK_XYZ,
1095 .flags = IR3_INSTR_S,
1096 };
1097 static const struct tex_info tex1da = {
1098 .order = { 0, -1, 1, -1 }, /* coord.xy */
1099 .src_wrmask = TGSI_WRITEMASK_XYZ,
1100 .flags = IR3_INSTR_A,
1101 };
1102 static const struct tex_info tex1dsa = {
1103 .order = { 0, -1, 1, 2 }, /* coord.xyz */
1104 .src_wrmask = TGSI_WRITEMASK_XYZW,
1105 .flags = IR3_INSTR_S | IR3_INSTR_A,
1106 };
1107 static const struct tex_info tex2d = {
1108 .order = { 0, 1, -1, -1 }, /* coord.xy */
1109 .src_wrmask = TGSI_WRITEMASK_XY,
1110 .flags = 0,
1111 };
1112 static const struct tex_info tex2ds = {
1113 .order = { 0, 1, 2, -1 }, /* coord.xyz */
1114 .src_wrmask = TGSI_WRITEMASK_XYZ,
1115 .flags = IR3_INSTR_S,
1116 };
1117 static const struct tex_info tex2da = {
1118 .order = { 0, 1, 2, -1 }, /* coord.xyz */
1119 .src_wrmask = TGSI_WRITEMASK_XYZ,
1120 .flags = IR3_INSTR_A,
1121 };
1122 static const struct tex_info tex2dsa = {
1123 .order = { 0, 1, 2, 3 }, /* coord.xyzw */
1124 .src_wrmask = TGSI_WRITEMASK_XYZW,
1125 .flags = IR3_INSTR_S | IR3_INSTR_A,
1126 };
1127 static const struct tex_info tex3d = {
1128 .order = { 0, 1, 2, -1 }, /* coord.xyz */
1129 .src_wrmask = TGSI_WRITEMASK_XYZ,
1130 .flags = IR3_INSTR_3D,
1131 };
1132 static const struct tex_info tex3ds = {
1133 .order = { 0, 1, 2, 3 }, /* coord.xyzw */
1134 .src_wrmask = TGSI_WRITEMASK_XYZW,
1135 .flags = IR3_INSTR_S | IR3_INSTR_3D,
1136 };
1137 static const struct tex_info txp1d = {
1138 .order = { 0, -1, 3, -1 }, /* coord.xw */
1139 .src_wrmask = TGSI_WRITEMASK_XYZ,
1140 .flags = IR3_INSTR_P,
1141 };
1142 static const struct tex_info txp1ds = {
1143 .order = { 0, -1, 2, 3 }, /* coord.xyz */
1144 .src_wrmask = TGSI_WRITEMASK_XYZW,
1145 .flags = IR3_INSTR_P | IR3_INSTR_S,
1146 };
1147 static const struct tex_info txp2d = {
1148 .order = { 0, 1, 3, -1 }, /* coord.xyw */
1149 .src_wrmask = TGSI_WRITEMASK_XYZ,
1150 .flags = IR3_INSTR_P,
1151 };
1152 static const struct tex_info txp2ds = {
1153 .order = { 0, 1, 2, 3 }, /* coord.xyzw */
1154 .src_wrmask = TGSI_WRITEMASK_XYZW,
1155 .flags = IR3_INSTR_P | IR3_INSTR_S,
1156 };
1157 static const struct tex_info txp3d = {
1158 .order = { 0, 1, 2, 3 }, /* coord.xyzw */
1159 .src_wrmask = TGSI_WRITEMASK_XYZW,
1160 .flags = IR3_INSTR_P | IR3_INSTR_3D,
1161 };
1162
1163 unsigned tex = inst->Texture.Texture;
1164
1165 switch (inst->Instruction.Opcode) {
1166 case TGSI_OPCODE_TEX:
1167 case TGSI_OPCODE_TXB:
1168 case TGSI_OPCODE_TXL:
1169 switch (tex) {
1170 case TGSI_TEXTURE_1D:
1171 return &tex1d;
1172 case TGSI_TEXTURE_SHADOW1D:
1173 return &tex1ds;
1174 case TGSI_TEXTURE_1D_ARRAY:
1175 return &tex1da;
1176 case TGSI_TEXTURE_SHADOW1D_ARRAY:
1177 return &tex1dsa;
1178 case TGSI_TEXTURE_2D:
1179 case TGSI_TEXTURE_RECT:
1180 return &tex2d;
1181 case TGSI_TEXTURE_SHADOW2D:
1182 case TGSI_TEXTURE_SHADOWRECT:
1183 return &tex2ds;
1184 case TGSI_TEXTURE_2D_ARRAY:
1185 return &tex2da;
1186 case TGSI_TEXTURE_SHADOW2D_ARRAY:
1187 return &tex2dsa;
1188 case TGSI_TEXTURE_3D:
1189 case TGSI_TEXTURE_CUBE:
1190 return &tex3d;
1191 case TGSI_TEXTURE_SHADOWCUBE:
1192 return &tex3ds;
1193 default:
1194 compile_error(ctx, "unknown texture type: %s\n",
1195 tgsi_texture_names[tex]);
1196 return NULL;
1197 }
1198 break;
1199 case TGSI_OPCODE_TXP:
1200 switch (tex) {
1201 case TGSI_TEXTURE_1D:
1202 return &txp1d;
1203 case TGSI_TEXTURE_SHADOW1D:
1204 return &txp1ds;
1205 case TGSI_TEXTURE_2D:
1206 case TGSI_TEXTURE_RECT:
1207 return &txp2d;
1208 case TGSI_TEXTURE_SHADOW2D:
1209 case TGSI_TEXTURE_SHADOWRECT:
1210 return &txp2ds;
1211 case TGSI_TEXTURE_3D:
1212 case TGSI_TEXTURE_CUBE:
1213 return &txp3d;
1214 default:
1215 compile_error(ctx, "unknown texture type: %s\n",
1216 tgsi_texture_names[tex]);
1217 break;
1218 }
1219 break;
1220 }
1221 compile_assert(ctx, 0);
1222 return NULL;
1223 }
1224
1225 static bool check_swiz(struct tgsi_src_register *src, const int8_t order[4])
1226 {
1227 unsigned i;
1228 for (i = 1; (i < 4) && order[i] >= 0; i++)
1229 if (src_swiz(src, i) != (src_swiz(src, 0) + order[i]))
1230 return false;
1231 return true;
1232 }
1233
1234 static bool is_1d(unsigned tex)
1235 {
1236 switch (tex) {
1237 case TGSI_TEXTURE_1D:
1238 case TGSI_TEXTURE_SHADOW1D:
1239 case TGSI_TEXTURE_1D_ARRAY:
1240 case TGSI_TEXTURE_SHADOW1D_ARRAY:
1241 return true;
1242 default:
1243 return false;
1244 }
1245 }
1246
1247 static struct tgsi_src_register *
1248 get_tex_coord(struct ir3_compile_context *ctx,
1249 struct tgsi_full_instruction *inst,
1250 const struct tex_info *tinf)
1251 {
1252 struct tgsi_src_register *coord = &inst->Src[0].Register;
1253 struct ir3_instruction *instr;
1254 unsigned tex = inst->Texture.Texture;
1255 bool needs_mov = false;
1256
1257 /* cat5 instruction cannot seem to handle const or relative: */
1258 if (is_rel_or_const(coord))
1259 needs_mov = true;
1260
1261 /* 1D textures we fix up w/ 0.5 as 2nd coord: */
1262 if (is_1d(tex))
1263 needs_mov = true;
1264
1265 /* The texture sample instructions need to coord in successive
1266 * registers/components (ie. src.xy but not src.yx). And TXP
1267 * needs the .w component in .z for 2D.. so in some cases we
1268 * might need to emit some mov instructions to shuffle things
1269 * around:
1270 */
1271 if (!needs_mov)
1272 needs_mov = !check_swiz(coord, tinf->order);
1273
1274 if (needs_mov) {
1275 struct tgsi_dst_register tmp_dst;
1276 struct tgsi_src_register *tmp_src;
1277 unsigned j;
1278
1279 type_t type_mov = get_ftype(ctx);
1280
1281 /* need to move things around: */
1282 tmp_src = get_internal_temp(ctx, &tmp_dst);
1283
1284 for (j = 0; j < 4; j++) {
1285 if (tinf->order[j] < 0)
1286 continue;
1287 instr = instr_create(ctx, 1, 0); /* mov */
1288 instr->cat1.src_type = type_mov;
1289 instr->cat1.dst_type = type_mov;
1290 add_dst_reg(ctx, instr, &tmp_dst, j);
1291 add_src_reg(ctx, instr, coord,
1292 src_swiz(coord, tinf->order[j]));
1293 }
1294
1295 /* fix up .y coord: */
1296 if (is_1d(tex)) {
1297 instr = instr_create(ctx, 1, 0); /* mov */
1298 instr->cat1.src_type = type_mov;
1299 instr->cat1.dst_type = type_mov;
1300 add_dst_reg(ctx, instr, &tmp_dst, 1); /* .y */
1301 ir3_reg_create(instr, 0, IR3_REG_IMMED)->fim_val = 0.5;
1302 }
1303
1304 coord = tmp_src;
1305 }
1306
1307 return coord;
1308 }
1309
1310 static void
1311 trans_samp(const struct instr_translater *t,
1312 struct ir3_compile_context *ctx,
1313 struct tgsi_full_instruction *inst)
1314 {
1315 struct ir3_instruction *instr;
1316 struct tgsi_dst_register *dst = &inst->Dst[0].Register;
1317 struct tgsi_src_register *coord;
1318 struct tgsi_src_register *samp = &inst->Src[1].Register;
1319 const struct tex_info *tinf;
1320
1321 tinf = get_tex_info(ctx, inst);
1322 coord = get_tex_coord(ctx, inst, tinf);
1323
1324 instr = instr_create(ctx, 5, t->opc);
1325 instr->cat5.type = get_ftype(ctx);
1326 instr->cat5.samp = samp->Index;
1327 instr->cat5.tex = samp->Index;
1328 instr->flags |= tinf->flags;
1329
1330 add_dst_reg_wrmask(ctx, instr, dst, 0, dst->WriteMask);
1331 add_src_reg_wrmask(ctx, instr, coord, coord->SwizzleX, tinf->src_wrmask);
1332
1333 if (t->opc != OPC_SAM)
1334 add_src_reg_wrmask(ctx, instr, coord, coord->SwizzleW, 0x1);
1335 }
1336
1337 /* DDX/DDY */
1338 static void
1339 trans_deriv(const struct instr_translater *t,
1340 struct ir3_compile_context *ctx,
1341 struct tgsi_full_instruction *inst)
1342 {
1343 struct ir3_instruction *instr;
1344 struct tgsi_dst_register *dst = &inst->Dst[0].Register;
1345 struct tgsi_src_register *src = &inst->Src[0].Register;
1346 static const int8_t order[4] = {0, 1, 2, 3};
1347
1348 if (!check_swiz(src, order)) {
1349 struct tgsi_dst_register tmp_dst;
1350 struct tgsi_src_register *tmp_src;
1351
1352 tmp_src = get_internal_temp(ctx, &tmp_dst);
1353 create_mov(ctx, &tmp_dst, src);
1354
1355 src = tmp_src;
1356 }
1357
1358 /* This might be a workaround for hw bug? Blob compiler always
1359 * seems to work two components at a time for dsy/dsx. It does
1360 * actually seem to work in some cases (or at least some piglit
1361 * tests) for four components at a time. But seems more reliable
1362 * to split this into two instructions like the blob compiler
1363 * does:
1364 */
1365
1366 instr = instr_create(ctx, 5, t->opc);
1367 instr->cat5.type = get_ftype(ctx);
1368 add_dst_reg_wrmask(ctx, instr, dst, 0, dst->WriteMask & 0x3);
1369 add_src_reg_wrmask(ctx, instr, src, 0, dst->WriteMask & 0x3);
1370
1371 instr = instr_create(ctx, 5, t->opc);
1372 instr->cat5.type = get_ftype(ctx);
1373 add_dst_reg_wrmask(ctx, instr, dst, 2, (dst->WriteMask >> 2) & 0x3);
1374 add_src_reg_wrmask(ctx, instr, src, 2, (dst->WriteMask >> 2) & 0x3);
1375 }
1376
1377 /*
1378 * SEQ(a,b) = (a == b) ? 1.0 : 0.0
1379 * cmps.f.eq tmp0, a, b
1380 * cov.u16f16 dst, tmp0
1381 *
1382 * SNE(a,b) = (a != b) ? 1.0 : 0.0
1383 * cmps.f.ne tmp0, a, b
1384 * cov.u16f16 dst, tmp0
1385 *
1386 * SGE(a,b) = (a >= b) ? 1.0 : 0.0
1387 * cmps.f.ge tmp0, a, b
1388 * cov.u16f16 dst, tmp0
1389 *
1390 * SLE(a,b) = (a <= b) ? 1.0 : 0.0
1391 * cmps.f.le tmp0, a, b
1392 * cov.u16f16 dst, tmp0
1393 *
1394 * SGT(a,b) = (a > b) ? 1.0 : 0.0
1395 * cmps.f.gt tmp0, a, b
1396 * cov.u16f16 dst, tmp0
1397 *
1398 * SLT(a,b) = (a < b) ? 1.0 : 0.0
1399 * cmps.f.lt tmp0, a, b
1400 * cov.u16f16 dst, tmp0
1401 *
1402 * CMP(a,b,c) = (a < 0.0) ? b : c
1403 * cmps.f.lt tmp0, a, {0.0}
1404 * sel.b16 dst, b, tmp0, c
1405 */
1406 static void
1407 trans_cmp(const struct instr_translater *t,
1408 struct ir3_compile_context *ctx,
1409 struct tgsi_full_instruction *inst)
1410 {
1411 struct ir3_instruction *instr;
1412 struct tgsi_dst_register tmp_dst;
1413 struct tgsi_src_register *tmp_src;
1414 struct tgsi_src_register constval0;
1415 /* final instruction for CMP() uses orig src1 and src2: */
1416 struct tgsi_dst_register *dst = get_dst(ctx, inst);
1417 struct tgsi_src_register *a0, *a1, *a2;
1418 unsigned condition;
1419
1420 tmp_src = get_internal_temp(ctx, &tmp_dst);
1421
1422 a0 = &inst->Src[0].Register; /* a */
1423 a1 = &inst->Src[1].Register; /* b */
1424
1425 switch (t->tgsi_opc) {
1426 case TGSI_OPCODE_SEQ:
1427 case TGSI_OPCODE_FSEQ:
1428 condition = IR3_COND_EQ;
1429 break;
1430 case TGSI_OPCODE_SNE:
1431 case TGSI_OPCODE_FSNE:
1432 condition = IR3_COND_NE;
1433 break;
1434 case TGSI_OPCODE_SGE:
1435 case TGSI_OPCODE_FSGE:
1436 condition = IR3_COND_GE;
1437 break;
1438 case TGSI_OPCODE_SLT:
1439 case TGSI_OPCODE_FSLT:
1440 condition = IR3_COND_LT;
1441 break;
1442 case TGSI_OPCODE_SLE:
1443 condition = IR3_COND_LE;
1444 break;
1445 case TGSI_OPCODE_SGT:
1446 condition = IR3_COND_GT;
1447 break;
1448 case TGSI_OPCODE_CMP:
1449 get_immediate(ctx, &constval0, fui(0.0));
1450 a0 = &inst->Src[0].Register; /* a */
1451 a1 = &constval0; /* {0.0} */
1452 condition = IR3_COND_LT;
1453 break;
1454 default:
1455 compile_assert(ctx, 0);
1456 return;
1457 }
1458
1459 if (is_const(a0) && is_const(a1))
1460 a0 = get_unconst(ctx, a0);
1461
1462 /* cmps.f.<cond> tmp, a0, a1 */
1463 instr = instr_create(ctx, 2, OPC_CMPS_F);
1464 instr->cat2.condition = condition;
1465 vectorize(ctx, instr, &tmp_dst, 2, a0, 0, a1, 0);
1466
1467 switch (t->tgsi_opc) {
1468 case TGSI_OPCODE_SEQ:
1469 case TGSI_OPCODE_SGE:
1470 case TGSI_OPCODE_SLE:
1471 case TGSI_OPCODE_SNE:
1472 case TGSI_OPCODE_SGT:
1473 case TGSI_OPCODE_SLT:
1474 /* cov.u16f16 dst, tmp0 */
1475 instr = instr_create(ctx, 1, 0);
1476 instr->cat1.src_type = get_utype(ctx);
1477 instr->cat1.dst_type = get_ftype(ctx);
1478 vectorize(ctx, instr, dst, 1, tmp_src, 0);
1479 break;
1480 case TGSI_OPCODE_FSEQ:
1481 case TGSI_OPCODE_FSGE:
1482 case TGSI_OPCODE_FSNE:
1483 case TGSI_OPCODE_FSLT:
1484 /* absneg.s dst, (neg)tmp0 */
1485 instr = instr_create(ctx, 2, OPC_ABSNEG_S);
1486 vectorize(ctx, instr, dst, 1, tmp_src, IR3_REG_NEGATE);
1487 break;
1488 case TGSI_OPCODE_CMP:
1489 a1 = &inst->Src[1].Register;
1490 a2 = &inst->Src[2].Register;
1491 /* sel.{b32,b16} dst, src2, tmp, src1 */
1492 instr = instr_create(ctx, 3, OPC_SEL_B32);
1493 vectorize(ctx, instr, dst, 3, a1, 0, tmp_src, 0, a2, 0);
1494
1495 break;
1496 }
1497
1498 put_dst(ctx, inst, dst);
1499 }
1500
1501 /*
1502 * USNE(a,b) = (a != b) ? ~0 : 0
1503 * cmps.u32.ne dst, a, b
1504 *
1505 * USEQ(a,b) = (a == b) ? ~0 : 0
1506 * cmps.u32.eq dst, a, b
1507 *
1508 * ISGE(a,b) = (a > b) ? ~0 : 0
1509 * cmps.s32.ge dst, a, b
1510 *
1511 * USGE(a,b) = (a > b) ? ~0 : 0
1512 * cmps.u32.ge dst, a, b
1513 *
1514 * ISLT(a,b) = (a < b) ? ~0 : 0
1515 * cmps.s32.lt dst, a, b
1516 *
1517 * USLT(a,b) = (a < b) ? ~0 : 0
1518 * cmps.u32.lt dst, a, b
1519 *
1520 */
1521 static void
1522 trans_icmp(const struct instr_translater *t,
1523 struct ir3_compile_context *ctx,
1524 struct tgsi_full_instruction *inst)
1525 {
1526 struct ir3_instruction *instr;
1527 struct tgsi_dst_register *dst = get_dst(ctx, inst);
1528 struct tgsi_dst_register tmp_dst;
1529 struct tgsi_src_register *tmp_src;
1530 struct tgsi_src_register *a0, *a1;
1531 unsigned condition;
1532
1533 a0 = &inst->Src[0].Register; /* a */
1534 a1 = &inst->Src[1].Register; /* b */
1535
1536 switch (t->tgsi_opc) {
1537 case TGSI_OPCODE_USNE:
1538 condition = IR3_COND_NE;
1539 break;
1540 case TGSI_OPCODE_USEQ:
1541 condition = IR3_COND_EQ;
1542 break;
1543 case TGSI_OPCODE_ISGE:
1544 case TGSI_OPCODE_USGE:
1545 condition = IR3_COND_GE;
1546 break;
1547 case TGSI_OPCODE_ISLT:
1548 case TGSI_OPCODE_USLT:
1549 condition = IR3_COND_LT;
1550 break;
1551
1552 default:
1553 compile_assert(ctx, 0);
1554 return;
1555 }
1556
1557 if (is_const(a0) && is_const(a1))
1558 a0 = get_unconst(ctx, a0);
1559
1560 tmp_src = get_internal_temp(ctx, &tmp_dst);
1561 /* cmps.{u32,s32}.<cond> tmp, a0, a1 */
1562 instr = instr_create(ctx, 2, t->opc);
1563 instr->cat2.condition = condition;
1564 vectorize(ctx, instr, &tmp_dst, 2, a0, 0, a1, 0);
1565
1566 /* absneg.s dst, (neg)tmp */
1567 instr = instr_create(ctx, 2, OPC_ABSNEG_S);
1568 vectorize(ctx, instr, dst, 1, tmp_src, IR3_REG_NEGATE);
1569
1570 put_dst(ctx, inst, dst);
1571 }
1572
1573 /*
1574 * UCMP(a,b,c) = a ? b : c
1575 * sel.b16 dst, b, a, c
1576 */
1577 static void
1578 trans_ucmp(const struct instr_translater *t,
1579 struct ir3_compile_context *ctx,
1580 struct tgsi_full_instruction *inst)
1581 {
1582 struct ir3_instruction *instr;
1583 struct tgsi_dst_register *dst = get_dst(ctx, inst);
1584 struct tgsi_src_register *a0, *a1, *a2;
1585
1586 a0 = &inst->Src[0].Register; /* a */
1587 a1 = &inst->Src[1].Register; /* b */
1588 a2 = &inst->Src[2].Register; /* c */
1589
1590 if (is_rel_or_const(a0))
1591 a0 = get_unconst(ctx, a0);
1592
1593 /* sel.{b32,b16} dst, b, a, c */
1594 instr = instr_create(ctx, 3, OPC_SEL_B32);
1595 vectorize(ctx, instr, dst, 3, a1, 0, a0, 0, a2, 0);
1596 put_dst(ctx, inst, dst);
1597 }
1598
1599
1600 /*
1601 * Conditional / Flow control
1602 */
1603
1604 static void
1605 push_branch(struct ir3_compile_context *ctx, bool inv,
1606 struct ir3_instruction *instr, struct ir3_instruction *cond)
1607 {
1608 unsigned int idx = ctx->branch_count++;
1609 compile_assert(ctx, idx < ARRAY_SIZE(ctx->branch));
1610 ctx->branch[idx].instr = instr;
1611 ctx->branch[idx].inv = inv;
1612 /* else side of branch has same condition: */
1613 if (!inv)
1614 ctx->branch[idx].cond = cond;
1615 }
1616
1617 static struct ir3_instruction *
1618 pop_branch(struct ir3_compile_context *ctx)
1619 {
1620 unsigned int idx = --ctx->branch_count;
1621 return ctx->branch[idx].instr;
1622 }
1623
1624 static void
1625 trans_if(const struct instr_translater *t,
1626 struct ir3_compile_context *ctx,
1627 struct tgsi_full_instruction *inst)
1628 {
1629 struct ir3_instruction *instr, *cond;
1630 struct tgsi_src_register *src = &inst->Src[0].Register;
1631 struct tgsi_dst_register tmp_dst;
1632 struct tgsi_src_register *tmp_src;
1633 struct tgsi_src_register constval;
1634
1635 get_immediate(ctx, &constval, fui(0.0));
1636 tmp_src = get_internal_temp(ctx, &tmp_dst);
1637
1638 if (is_const(src))
1639 src = get_unconst(ctx, src);
1640
1641 /* cmps.{f,u}.ne tmp0, b, {0.0} */
1642 instr = instr_create(ctx, 2, t->opc);
1643 add_dst_reg(ctx, instr, &tmp_dst, 0);
1644 add_src_reg(ctx, instr, src, src->SwizzleX);
1645 add_src_reg(ctx, instr, &constval, constval.SwizzleX);
1646 instr->cat2.condition = IR3_COND_NE;
1647
1648 compile_assert(ctx, instr->regs[1]->flags & IR3_REG_SSA); /* because get_unconst() */
1649 cond = instr->regs[1]->instr;
1650
1651 /* meta:flow tmp0 */
1652 instr = instr_create(ctx, -1, OPC_META_FLOW);
1653 ir3_reg_create(instr, 0, 0); /* dummy dst */
1654 add_src_reg(ctx, instr, tmp_src, TGSI_SWIZZLE_X);
1655
1656 push_branch(ctx, false, instr, cond);
1657 instr->flow.if_block = push_block(ctx);
1658 }
1659
1660 static void
1661 trans_else(const struct instr_translater *t,
1662 struct ir3_compile_context *ctx,
1663 struct tgsi_full_instruction *inst)
1664 {
1665 struct ir3_instruction *instr;
1666
1667 pop_block(ctx);
1668
1669 instr = pop_branch(ctx);
1670
1671 compile_assert(ctx, (instr->category == -1) &&
1672 (instr->opc == OPC_META_FLOW));
1673
1674 push_branch(ctx, true, instr, NULL);
1675 instr->flow.else_block = push_block(ctx);
1676 }
1677
1678 static struct ir3_instruction *
1679 find_temporary(struct ir3_block *block, unsigned n)
1680 {
1681 if (block->parent && !block->temporaries[n])
1682 return find_temporary(block->parent, n);
1683 return block->temporaries[n];
1684 }
1685
1686 static struct ir3_instruction *
1687 find_output(struct ir3_block *block, unsigned n)
1688 {
1689 if (block->parent && !block->outputs[n])
1690 return find_output(block->parent, n);
1691 return block->outputs[n];
1692 }
1693
1694 static struct ir3_instruction *
1695 create_phi(struct ir3_compile_context *ctx, struct ir3_instruction *cond,
1696 struct ir3_instruction *a, struct ir3_instruction *b)
1697 {
1698 struct ir3_instruction *phi;
1699
1700 compile_assert(ctx, cond);
1701
1702 /* Either side of the condition could be null.. which
1703 * indicates a variable written on only one side of the
1704 * branch. Normally this should only be variables not
1705 * used outside of that side of the branch. So we could
1706 * just 'return a ? a : b;' in that case. But for better
1707 * defined undefined behavior we just stick in imm{0.0}.
1708 * In the common case of a value only used within the
1709 * one side of the branch, the PHI instruction will not
1710 * get scheduled
1711 */
1712 if (!a)
1713 a = create_immed(ctx, 0.0);
1714 if (!b)
1715 b = create_immed(ctx, 0.0);
1716
1717 phi = instr_create(ctx, -1, OPC_META_PHI);
1718 ir3_reg_create(phi, 0, 0); /* dummy dst */
1719 ir3_reg_create(phi, 0, IR3_REG_SSA)->instr = cond;
1720 ir3_reg_create(phi, 0, IR3_REG_SSA)->instr = a;
1721 ir3_reg_create(phi, 0, IR3_REG_SSA)->instr = b;
1722
1723 return phi;
1724 }
1725
1726 static void
1727 trans_endif(const struct instr_translater *t,
1728 struct ir3_compile_context *ctx,
1729 struct tgsi_full_instruction *inst)
1730 {
1731 struct ir3_instruction *instr;
1732 struct ir3_block *ifb, *elseb;
1733 struct ir3_instruction **ifout, **elseout;
1734 unsigned i, ifnout = 0, elsenout = 0;
1735
1736 pop_block(ctx);
1737
1738 instr = pop_branch(ctx);
1739
1740 compile_assert(ctx, (instr->category == -1) &&
1741 (instr->opc == OPC_META_FLOW));
1742
1743 ifb = instr->flow.if_block;
1744 elseb = instr->flow.else_block;
1745 /* if there is no else block, the parent block is used for the
1746 * branch-not-taken src of the PHI instructions:
1747 */
1748 if (!elseb)
1749 elseb = ifb->parent;
1750
1751 /* worst case sizes: */
1752 ifnout = ifb->ntemporaries + ifb->noutputs;
1753 elsenout = elseb->ntemporaries + elseb->noutputs;
1754
1755 ifout = ir3_alloc(ctx->ir, sizeof(ifb->outputs[0]) * ifnout);
1756 if (elseb != ifb->parent)
1757 elseout = ir3_alloc(ctx->ir, sizeof(ifb->outputs[0]) * elsenout);
1758
1759 ifnout = 0;
1760 elsenout = 0;
1761
1762 /* generate PHI instructions for any temporaries written: */
1763 for (i = 0; i < ifb->ntemporaries; i++) {
1764 struct ir3_instruction *a = ifb->temporaries[i];
1765 struct ir3_instruction *b = elseb->temporaries[i];
1766
1767 /* if temporary written in if-block, or if else block
1768 * is present and temporary written in else-block:
1769 */
1770 if (a || ((elseb != ifb->parent) && b)) {
1771 struct ir3_instruction *phi;
1772
1773 /* if only written on one side, find the closest
1774 * enclosing update on other side:
1775 */
1776 if (!a)
1777 a = find_temporary(ifb, i);
1778 if (!b)
1779 b = find_temporary(elseb, i);
1780
1781 ifout[ifnout] = a;
1782 a = create_output(ifb, a, ifnout++);
1783
1784 if (elseb != ifb->parent) {
1785 elseout[elsenout] = b;
1786 b = create_output(elseb, b, elsenout++);
1787 }
1788
1789 phi = create_phi(ctx, instr, a, b);
1790 ctx->block->temporaries[i] = phi;
1791 }
1792 }
1793
1794 compile_assert(ctx, ifb->noutputs == elseb->noutputs);
1795
1796 /* .. and any outputs written: */
1797 for (i = 0; i < ifb->noutputs; i++) {
1798 struct ir3_instruction *a = ifb->outputs[i];
1799 struct ir3_instruction *b = elseb->outputs[i];
1800
1801 /* if output written in if-block, or if else block
1802 * is present and output written in else-block:
1803 */
1804 if (a || ((elseb != ifb->parent) && b)) {
1805 struct ir3_instruction *phi;
1806
1807 /* if only written on one side, find the closest
1808 * enclosing update on other side:
1809 */
1810 if (!a)
1811 a = find_output(ifb, i);
1812 if (!b)
1813 b = find_output(elseb, i);
1814
1815 ifout[ifnout] = a;
1816 a = create_output(ifb, a, ifnout++);
1817
1818 if (elseb != ifb->parent) {
1819 elseout[elsenout] = b;
1820 b = create_output(elseb, b, elsenout++);
1821 }
1822
1823 phi = create_phi(ctx, instr, a, b);
1824 ctx->block->outputs[i] = phi;
1825 }
1826 }
1827
1828 ifb->noutputs = ifnout;
1829 ifb->outputs = ifout;
1830
1831 if (elseb != ifb->parent) {
1832 elseb->noutputs = elsenout;
1833 elseb->outputs = elseout;
1834 }
1835
1836 // TODO maybe we want to compact block->inputs?
1837 }
1838
1839 /*
1840 * Kill
1841 */
1842
1843 static void
1844 trans_kill(const struct instr_translater *t,
1845 struct ir3_compile_context *ctx,
1846 struct tgsi_full_instruction *inst)
1847 {
1848 struct ir3_instruction *instr, *immed, *cond = NULL;
1849 bool inv = false;
1850
1851 switch (t->tgsi_opc) {
1852 case TGSI_OPCODE_KILL:
1853 /* unconditional kill, use enclosing if condition: */
1854 if (ctx->branch_count > 0) {
1855 unsigned int idx = ctx->branch_count - 1;
1856 cond = ctx->branch[idx].cond;
1857 inv = ctx->branch[idx].inv;
1858 } else {
1859 cond = create_immed(ctx, 1.0);
1860 }
1861
1862 break;
1863 }
1864
1865 compile_assert(ctx, cond);
1866
1867 immed = create_immed(ctx, 0.0);
1868
1869 /* cmps.f.ne p0.x, cond, {0.0} */
1870 instr = instr_create(ctx, 2, OPC_CMPS_F);
1871 instr->cat2.condition = IR3_COND_NE;
1872 ir3_reg_create(instr, regid(REG_P0, 0), 0);
1873 ir3_reg_create(instr, 0, IR3_REG_SSA)->instr = cond;
1874 ir3_reg_create(instr, 0, IR3_REG_SSA)->instr = immed;
1875 cond = instr;
1876
1877 /* kill p0.x */
1878 instr = instr_create(ctx, 0, OPC_KILL);
1879 instr->cat0.inv = inv;
1880 ir3_reg_create(instr, 0, 0); /* dummy dst */
1881 ir3_reg_create(instr, 0, IR3_REG_SSA)->instr = cond;
1882
1883 ctx->kill[ctx->kill_count++] = instr;
1884 }
1885
1886 /*
1887 * Kill-If
1888 */
1889
1890 static void
1891 trans_killif(const struct instr_translater *t,
1892 struct ir3_compile_context *ctx,
1893 struct tgsi_full_instruction *inst)
1894 {
1895 struct tgsi_src_register *src = &inst->Src[0].Register;
1896 struct ir3_instruction *instr, *immed, *cond = NULL;
1897 bool inv = false;
1898
1899 immed = create_immed(ctx, 0.0);
1900
1901 /* cmps.f.ne p0.x, cond, {0.0} */
1902 instr = instr_create(ctx, 2, OPC_CMPS_F);
1903 instr->cat2.condition = IR3_COND_NE;
1904 ir3_reg_create(instr, regid(REG_P0, 0), 0);
1905 ir3_reg_create(instr, 0, IR3_REG_SSA)->instr = immed;
1906 add_src_reg(ctx, instr, src, src->SwizzleX);
1907
1908 cond = instr;
1909
1910 /* kill p0.x */
1911 instr = instr_create(ctx, 0, OPC_KILL);
1912 instr->cat0.inv = inv;
1913 ir3_reg_create(instr, 0, 0); /* dummy dst */
1914 ir3_reg_create(instr, 0, IR3_REG_SSA)->instr = cond;
1915
1916 ctx->kill[ctx->kill_count++] = instr;
1917
1918 }
1919 /*
1920 * I2F / U2F / F2I / F2U
1921 */
1922
1923 static void
1924 trans_cov(const struct instr_translater *t,
1925 struct ir3_compile_context *ctx,
1926 struct tgsi_full_instruction *inst)
1927 {
1928 struct ir3_instruction *instr;
1929 struct tgsi_dst_register *dst = get_dst(ctx, inst);
1930 struct tgsi_src_register *src = &inst->Src[0].Register;
1931
1932 // cov.f32s32 dst, tmp0 /
1933 instr = instr_create(ctx, 1, 0);
1934 switch (t->tgsi_opc) {
1935 case TGSI_OPCODE_U2F:
1936 instr->cat1.src_type = TYPE_U32;
1937 instr->cat1.dst_type = TYPE_F32;
1938 break;
1939 case TGSI_OPCODE_I2F:
1940 instr->cat1.src_type = TYPE_S32;
1941 instr->cat1.dst_type = TYPE_F32;
1942 break;
1943 case TGSI_OPCODE_F2U:
1944 instr->cat1.src_type = TYPE_F32;
1945 instr->cat1.dst_type = TYPE_U32;
1946 break;
1947 case TGSI_OPCODE_F2I:
1948 instr->cat1.src_type = TYPE_F32;
1949 instr->cat1.dst_type = TYPE_S32;
1950 break;
1951
1952 }
1953 vectorize(ctx, instr, dst, 1, src, 0);
1954 put_dst(ctx, inst, dst);
1955 }
1956
1957 /*
1958 * UMUL
1959 *
1960 * There is no 32-bit multiply instruction, so splitting a and b into high and
1961 * low components, we get that
1962 *
1963 * dst = al * bl + ah * bl << 16 + al * bh << 16
1964 *
1965 * mull.u tmp0, a, b (mul low, i.e. al * bl)
1966 * madsh.m16 tmp1, a, b, tmp0 (mul-add shift high mix, i.e. ah * bl << 16)
1967 * madsh.m16 dst, b, a, tmp1 (i.e. al * bh << 16)
1968 */
1969 static void
1970 trans_umul(const struct instr_translater *t,
1971 struct ir3_compile_context *ctx,
1972 struct tgsi_full_instruction *inst)
1973 {
1974 struct ir3_instruction *instr;
1975 struct tgsi_dst_register *dst = get_dst(ctx, inst);
1976 struct tgsi_src_register *a = &inst->Src[0].Register;
1977 struct tgsi_src_register *b = &inst->Src[1].Register;
1978
1979 struct tgsi_dst_register tmp0_dst, tmp1_dst;
1980 struct tgsi_src_register *tmp0_src, *tmp1_src;
1981
1982 tmp0_src = get_internal_temp(ctx, &tmp0_dst);
1983 tmp1_src = get_internal_temp(ctx, &tmp1_dst);
1984
1985 if (is_rel_or_const(a))
1986 a = get_unconst(ctx, a);
1987 if (is_rel_or_const(b))
1988 b = get_unconst(ctx, b);
1989
1990 /* mull.u tmp0, a, b */
1991 instr = instr_create(ctx, 2, OPC_MULL_U);
1992 vectorize(ctx, instr, &tmp0_dst, 2, a, 0, b, 0);
1993
1994 /* madsh.m16 tmp1, a, b, tmp0 */
1995 instr = instr_create(ctx, 3, OPC_MADSH_M16);
1996 vectorize(ctx, instr, &tmp1_dst, 3, a, 0, b, 0, tmp0_src, 0);
1997
1998 /* madsh.m16 dst, b, a, tmp1 */
1999 instr = instr_create(ctx, 3, OPC_MADSH_M16);
2000 vectorize(ctx, instr, dst, 3, b, 0, a, 0, tmp1_src, 0);
2001 put_dst(ctx, inst, dst);
2002 }
2003
2004 /*
2005 * Handlers for TGSI instructions which do have 1:1 mapping to native
2006 * instructions:
2007 */
2008
2009 static void
2010 instr_cat0(const struct instr_translater *t,
2011 struct ir3_compile_context *ctx,
2012 struct tgsi_full_instruction *inst)
2013 {
2014 instr_create(ctx, 0, t->opc);
2015 }
2016
2017 static void
2018 instr_cat1(const struct instr_translater *t,
2019 struct ir3_compile_context *ctx,
2020 struct tgsi_full_instruction *inst)
2021 {
2022 struct tgsi_dst_register *dst = get_dst(ctx, inst);
2023 struct tgsi_src_register *src = &inst->Src[0].Register;
2024 create_mov(ctx, dst, src);
2025 put_dst(ctx, inst, dst);
2026 }
2027
2028 static void
2029 instr_cat2(const struct instr_translater *t,
2030 struct ir3_compile_context *ctx,
2031 struct tgsi_full_instruction *inst)
2032 {
2033 struct tgsi_dst_register *dst = get_dst(ctx, inst);
2034 struct tgsi_src_register *src0 = &inst->Src[0].Register;
2035 struct tgsi_src_register *src1 = &inst->Src[1].Register;
2036 struct ir3_instruction *instr;
2037 unsigned src0_flags = 0, src1_flags = 0;
2038
2039 switch (t->tgsi_opc) {
2040 case TGSI_OPCODE_ABS:
2041 case TGSI_OPCODE_IABS:
2042 src0_flags = IR3_REG_ABS;
2043 break;
2044 case TGSI_OPCODE_INEG:
2045 src0_flags = IR3_REG_NEGATE;
2046 break;
2047 case TGSI_OPCODE_SUB:
2048 src1_flags = IR3_REG_NEGATE;
2049 break;
2050 }
2051
2052 switch (t->opc) {
2053 case OPC_ABSNEG_F:
2054 case OPC_ABSNEG_S:
2055 case OPC_CLZ_B:
2056 case OPC_CLZ_S:
2057 case OPC_SIGN_F:
2058 case OPC_FLOOR_F:
2059 case OPC_CEIL_F:
2060 case OPC_RNDNE_F:
2061 case OPC_RNDAZ_F:
2062 case OPC_TRUNC_F:
2063 case OPC_NOT_B:
2064 case OPC_BFREV_B:
2065 case OPC_SETRM:
2066 case OPC_CBITS_B:
2067 /* these only have one src reg */
2068 instr = instr_create(ctx, 2, t->opc);
2069 vectorize(ctx, instr, dst, 1, src0, src0_flags);
2070 break;
2071 default:
2072 if (is_const(src0) && is_const(src1))
2073 src0 = get_unconst(ctx, src0);
2074
2075 instr = instr_create(ctx, 2, t->opc);
2076 vectorize(ctx, instr, dst, 2, src0, src0_flags,
2077 src1, src1_flags);
2078 break;
2079 }
2080
2081 put_dst(ctx, inst, dst);
2082 }
2083
2084 static void
2085 instr_cat3(const struct instr_translater *t,
2086 struct ir3_compile_context *ctx,
2087 struct tgsi_full_instruction *inst)
2088 {
2089 struct tgsi_dst_register *dst = get_dst(ctx, inst);
2090 struct tgsi_src_register *src0 = &inst->Src[0].Register;
2091 struct tgsi_src_register *src1 = &inst->Src[1].Register;
2092 struct ir3_instruction *instr;
2093
2094 /* in particular, can't handle const for src1 for cat3..
2095 * for mad, we can swap first two src's if needed:
2096 */
2097 if (is_rel_or_const(src1)) {
2098 if (is_mad(t->opc) && !is_rel_or_const(src0)) {
2099 struct tgsi_src_register *tmp;
2100 tmp = src0;
2101 src0 = src1;
2102 src1 = tmp;
2103 } else {
2104 src1 = get_unconst(ctx, src1);
2105 }
2106 }
2107
2108 instr = instr_create(ctx, 3, t->opc);
2109 vectorize(ctx, instr, dst, 3, src0, 0, src1, 0,
2110 &inst->Src[2].Register, 0);
2111 put_dst(ctx, inst, dst);
2112 }
2113
2114 static void
2115 instr_cat4(const struct instr_translater *t,
2116 struct ir3_compile_context *ctx,
2117 struct tgsi_full_instruction *inst)
2118 {
2119 struct tgsi_dst_register *dst = get_dst(ctx, inst);
2120 struct tgsi_src_register *src = &inst->Src[0].Register;
2121 struct ir3_instruction *instr;
2122 unsigned i;
2123
2124 /* seems like blob compiler avoids const as src.. */
2125 if (is_const(src))
2126 src = get_unconst(ctx, src);
2127
2128 /* we need to replicate into each component: */
2129 for (i = 0; i < 4; i++) {
2130 if (dst->WriteMask & (1 << i)) {
2131 instr = instr_create(ctx, 4, t->opc);
2132 add_dst_reg(ctx, instr, dst, i);
2133 add_src_reg(ctx, instr, src, src->SwizzleX);
2134 }
2135 }
2136
2137 put_dst(ctx, inst, dst);
2138 }
2139
2140 static const struct instr_translater translaters[TGSI_OPCODE_LAST] = {
2141 #define INSTR(n, f, ...) \
2142 [TGSI_OPCODE_ ## n] = { .fxn = (f), .tgsi_opc = TGSI_OPCODE_ ## n, ##__VA_ARGS__ }
2143
2144 INSTR(MOV, instr_cat1),
2145 INSTR(RCP, instr_cat4, .opc = OPC_RCP),
2146 INSTR(RSQ, instr_cat4, .opc = OPC_RSQ),
2147 INSTR(SQRT, instr_cat4, .opc = OPC_SQRT),
2148 INSTR(MUL, instr_cat2, .opc = OPC_MUL_F),
2149 INSTR(ADD, instr_cat2, .opc = OPC_ADD_F),
2150 INSTR(SUB, instr_cat2, .opc = OPC_ADD_F),
2151 INSTR(MIN, instr_cat2, .opc = OPC_MIN_F),
2152 INSTR(MAX, instr_cat2, .opc = OPC_MAX_F),
2153 INSTR(UADD, instr_cat2, .opc = OPC_ADD_U),
2154 INSTR(IMIN, instr_cat2, .opc = OPC_MIN_S),
2155 INSTR(UMIN, instr_cat2, .opc = OPC_MIN_U),
2156 INSTR(IMAX, instr_cat2, .opc = OPC_MAX_S),
2157 INSTR(UMAX, instr_cat2, .opc = OPC_MAX_U),
2158 INSTR(AND, instr_cat2, .opc = OPC_AND_B),
2159 INSTR(OR, instr_cat2, .opc = OPC_OR_B),
2160 INSTR(NOT, instr_cat2, .opc = OPC_NOT_B),
2161 INSTR(XOR, instr_cat2, .opc = OPC_XOR_B),
2162 INSTR(UMUL, trans_umul),
2163 INSTR(SHL, instr_cat2, .opc = OPC_SHL_B),
2164 INSTR(USHR, instr_cat2, .opc = OPC_SHR_B),
2165 INSTR(ISHR, instr_cat2, .opc = OPC_ASHR_B),
2166 INSTR(IABS, instr_cat2, .opc = OPC_ABSNEG_S),
2167 INSTR(INEG, instr_cat2, .opc = OPC_ABSNEG_S),
2168 INSTR(AND, instr_cat2, .opc = OPC_AND_B),
2169 INSTR(MAD, instr_cat3, .opc = OPC_MAD_F32, .hopc = OPC_MAD_F16),
2170 INSTR(TRUNC, instr_cat2, .opc = OPC_TRUNC_F),
2171 INSTR(CLAMP, trans_clamp),
2172 INSTR(FLR, instr_cat2, .opc = OPC_FLOOR_F),
2173 INSTR(ROUND, instr_cat2, .opc = OPC_RNDNE_F),
2174 INSTR(SSG, instr_cat2, .opc = OPC_SIGN_F),
2175 INSTR(CEIL, instr_cat2, .opc = OPC_CEIL_F),
2176 INSTR(ARL, trans_arl),
2177 INSTR(UARL, trans_arl),
2178 INSTR(EX2, instr_cat4, .opc = OPC_EXP2),
2179 INSTR(LG2, instr_cat4, .opc = OPC_LOG2),
2180 INSTR(ABS, instr_cat2, .opc = OPC_ABSNEG_F),
2181 INSTR(COS, instr_cat4, .opc = OPC_COS),
2182 INSTR(SIN, instr_cat4, .opc = OPC_SIN),
2183 INSTR(TEX, trans_samp, .opc = OPC_SAM, .arg = TGSI_OPCODE_TEX),
2184 INSTR(TXP, trans_samp, .opc = OPC_SAM, .arg = TGSI_OPCODE_TXP),
2185 INSTR(TXB, trans_samp, .opc = OPC_SAMB, .arg = TGSI_OPCODE_TXB),
2186 INSTR(TXL, trans_samp, .opc = OPC_SAML, .arg = TGSI_OPCODE_TXL),
2187 INSTR(DDX, trans_deriv, .opc = OPC_DSX),
2188 INSTR(DDY, trans_deriv, .opc = OPC_DSY),
2189 INSTR(SGT, trans_cmp),
2190 INSTR(SLT, trans_cmp),
2191 INSTR(FSLT, trans_cmp),
2192 INSTR(SGE, trans_cmp),
2193 INSTR(FSGE, trans_cmp),
2194 INSTR(SLE, trans_cmp),
2195 INSTR(SNE, trans_cmp),
2196 INSTR(FSNE, trans_cmp),
2197 INSTR(SEQ, trans_cmp),
2198 INSTR(FSEQ, trans_cmp),
2199 INSTR(CMP, trans_cmp),
2200 INSTR(USNE, trans_icmp, .opc = OPC_CMPS_U),
2201 INSTR(USEQ, trans_icmp, .opc = OPC_CMPS_U),
2202 INSTR(ISGE, trans_icmp, .opc = OPC_CMPS_S),
2203 INSTR(USGE, trans_icmp, .opc = OPC_CMPS_U),
2204 INSTR(ISLT, trans_icmp, .opc = OPC_CMPS_S),
2205 INSTR(USLT, trans_icmp, .opc = OPC_CMPS_U),
2206 INSTR(UCMP, trans_ucmp),
2207 INSTR(IF, trans_if, .opc = OPC_CMPS_F),
2208 INSTR(UIF, trans_if, .opc = OPC_CMPS_U),
2209 INSTR(ELSE, trans_else),
2210 INSTR(ENDIF, trans_endif),
2211 INSTR(END, instr_cat0, .opc = OPC_END),
2212 INSTR(KILL, trans_kill, .opc = OPC_KILL),
2213 INSTR(KILL_IF, trans_killif, .opc = OPC_KILL),
2214 INSTR(I2F, trans_cov),
2215 INSTR(U2F, trans_cov),
2216 INSTR(F2I, trans_cov),
2217 INSTR(F2U, trans_cov),
2218 };
2219
2220 static ir3_semantic
2221 decl_semantic(const struct tgsi_declaration_semantic *sem)
2222 {
2223 return ir3_semantic_name(sem->Name, sem->Index);
2224 }
2225
2226 static struct ir3_instruction *
2227 decl_in_frag_bary(struct ir3_compile_context *ctx, unsigned regid,
2228 unsigned j, unsigned inloc)
2229 {
2230 struct ir3_instruction *instr;
2231 struct ir3_register *src;
2232
2233 /* bary.f dst, #inloc, r0.x */
2234 instr = instr_create(ctx, 2, OPC_BARY_F);
2235 ir3_reg_create(instr, regid, 0); /* dummy dst */
2236 ir3_reg_create(instr, 0, IR3_REG_IMMED)->iim_val = inloc;
2237 src = ir3_reg_create(instr, 0, IR3_REG_SSA);
2238 src->wrmask = 0x3;
2239 src->instr = ctx->frag_pos;
2240
2241 return instr;
2242 }
2243
2244 /* TGSI_SEMANTIC_POSITION
2245 * """"""""""""""""""""""
2246 *
2247 * For fragment shaders, TGSI_SEMANTIC_POSITION is used to indicate that
2248 * fragment shader input contains the fragment's window position. The X
2249 * component starts at zero and always increases from left to right.
2250 * The Y component starts at zero and always increases but Y=0 may either
2251 * indicate the top of the window or the bottom depending on the fragment
2252 * coordinate origin convention (see TGSI_PROPERTY_FS_COORD_ORIGIN).
2253 * The Z coordinate ranges from 0 to 1 to represent depth from the front
2254 * to the back of the Z buffer. The W component contains the reciprocol
2255 * of the interpolated vertex position W component.
2256 */
2257 static struct ir3_instruction *
2258 decl_in_frag_coord(struct ir3_compile_context *ctx, unsigned regid,
2259 unsigned j)
2260 {
2261 struct ir3_instruction *instr, *src;
2262
2263 compile_assert(ctx, !ctx->frag_coord[j]);
2264
2265 ctx->frag_coord[j] = create_input(ctx->block, NULL, 0);
2266
2267
2268 switch (j) {
2269 case 0: /* .x */
2270 case 1: /* .y */
2271 /* for frag_coord, we get unsigned values.. we need
2272 * to subtract (integer) 8 and divide by 16 (right-
2273 * shift by 4) then convert to float:
2274 */
2275
2276 /* add.s tmp, src, -8 */
2277 instr = instr_create(ctx, 2, OPC_ADD_S);
2278 ir3_reg_create(instr, regid, 0); /* dummy dst */
2279 ir3_reg_create(instr, 0, IR3_REG_SSA)->instr = ctx->frag_coord[j];
2280 ir3_reg_create(instr, 0, IR3_REG_IMMED)->iim_val = -8;
2281 src = instr;
2282
2283 /* shr.b tmp, tmp, 4 */
2284 instr = instr_create(ctx, 2, OPC_SHR_B);
2285 ir3_reg_create(instr, regid, 0); /* dummy dst */
2286 ir3_reg_create(instr, 0, IR3_REG_SSA)->instr = src;
2287 ir3_reg_create(instr, 0, IR3_REG_IMMED)->iim_val = 4;
2288 src = instr;
2289
2290 /* mov.u32f32 dst, tmp */
2291 instr = instr_create(ctx, 1, 0);
2292 instr->cat1.src_type = TYPE_U32;
2293 instr->cat1.dst_type = TYPE_F32;
2294 ir3_reg_create(instr, regid, 0); /* dummy dst */
2295 ir3_reg_create(instr, 0, IR3_REG_SSA)->instr = src;
2296
2297 break;
2298 case 2: /* .z */
2299 case 3: /* .w */
2300 /* seems that we can use these as-is: */
2301 instr = ctx->frag_coord[j];
2302 break;
2303 default:
2304 compile_error(ctx, "invalid channel\n");
2305 instr = create_immed(ctx, 0.0);
2306 break;
2307 }
2308
2309 return instr;
2310 }
2311
2312 /* TGSI_SEMANTIC_FACE
2313 * """"""""""""""""""
2314 *
2315 * This label applies to fragment shader inputs only and indicates that
2316 * the register contains front/back-face information of the form (F, 0,
2317 * 0, 1). The first component will be positive when the fragment belongs
2318 * to a front-facing polygon, and negative when the fragment belongs to a
2319 * back-facing polygon.
2320 */
2321 static struct ir3_instruction *
2322 decl_in_frag_face(struct ir3_compile_context *ctx, unsigned regid,
2323 unsigned j)
2324 {
2325 struct ir3_instruction *instr, *src;
2326
2327 switch (j) {
2328 case 0: /* .x */
2329 compile_assert(ctx, !ctx->frag_face);
2330
2331 ctx->frag_face = create_input(ctx->block, NULL, 0);
2332
2333 /* for faceness, we always get -1 or 0 (int).. but TGSI expects
2334 * positive vs negative float.. and piglit further seems to
2335 * expect -1.0 or 1.0:
2336 *
2337 * mul.s tmp, hr0.x, 2
2338 * add.s tmp, tmp, 1
2339 * mov.s16f32, dst, tmp
2340 *
2341 */
2342
2343 instr = instr_create(ctx, 2, OPC_MUL_S);
2344 ir3_reg_create(instr, regid, 0); /* dummy dst */
2345 ir3_reg_create(instr, 0, IR3_REG_SSA)->instr = ctx->frag_face;
2346 ir3_reg_create(instr, 0, IR3_REG_IMMED)->iim_val = 2;
2347 src = instr;
2348
2349 instr = instr_create(ctx, 2, OPC_ADD_S);
2350 ir3_reg_create(instr, regid, 0); /* dummy dst */
2351 ir3_reg_create(instr, 0, IR3_REG_SSA)->instr = src;
2352 ir3_reg_create(instr, 0, IR3_REG_IMMED)->iim_val = 1;
2353 src = instr;
2354
2355 instr = instr_create(ctx, 1, 0); /* mov */
2356 instr->cat1.src_type = TYPE_S32;
2357 instr->cat1.dst_type = TYPE_F32;
2358 ir3_reg_create(instr, regid, 0); /* dummy dst */
2359 ir3_reg_create(instr, 0, IR3_REG_SSA)->instr = src;
2360
2361 break;
2362 case 1: /* .y */
2363 case 2: /* .z */
2364 instr = create_immed(ctx, 0.0);
2365 break;
2366 case 3: /* .w */
2367 instr = create_immed(ctx, 1.0);
2368 break;
2369 default:
2370 compile_error(ctx, "invalid channel\n");
2371 instr = create_immed(ctx, 0.0);
2372 break;
2373 }
2374
2375 return instr;
2376 }
2377
2378 static void
2379 decl_in(struct ir3_compile_context *ctx, struct tgsi_full_declaration *decl)
2380 {
2381 struct ir3_shader_variant *so = ctx->so;
2382 unsigned name = decl->Semantic.Name;
2383 unsigned i;
2384
2385 /* I don't think we should get frag shader input without
2386 * semantic info? Otherwise how do inputs get linked to
2387 * vert outputs?
2388 */
2389 compile_assert(ctx, (ctx->type == TGSI_PROCESSOR_VERTEX) ||
2390 decl->Declaration.Semantic);
2391
2392 for (i = decl->Range.First; i <= decl->Range.Last; i++) {
2393 unsigned n = so->inputs_count++;
2394 unsigned r = regid(i, 0);
2395 unsigned ncomp, j;
2396
2397 /* we'll figure out the actual components used after scheduling */
2398 ncomp = 4;
2399
2400 DBG("decl in -> r%d", i);
2401
2402 compile_assert(ctx, n < ARRAY_SIZE(so->inputs));
2403
2404 so->inputs[n].semantic = decl_semantic(&decl->Semantic);
2405 so->inputs[n].compmask = (1 << ncomp) - 1;
2406 so->inputs[n].regid = r;
2407 so->inputs[n].inloc = ctx->next_inloc;
2408
2409 for (j = 0; j < ncomp; j++) {
2410 struct ir3_instruction *instr = NULL;
2411
2412 if (ctx->type == TGSI_PROCESSOR_FRAGMENT) {
2413 /* for fragment shaders, POSITION and FACE are handled
2414 * specially, not using normal varying / bary.f
2415 */
2416 if (name == TGSI_SEMANTIC_POSITION) {
2417 so->inputs[n].bary = false;
2418 so->frag_coord = true;
2419 instr = decl_in_frag_coord(ctx, r + j, j);
2420 } else if (name == TGSI_SEMANTIC_FACE) {
2421 so->inputs[n].bary = false;
2422 so->frag_face = true;
2423 instr = decl_in_frag_face(ctx, r + j, j);
2424 } else {
2425 so->inputs[n].bary = true;
2426 instr = decl_in_frag_bary(ctx, r + j, j,
2427 so->inputs[n].inloc + j - 8);
2428 }
2429 } else {
2430 instr = create_input(ctx->block, NULL, (i * 4) + j);
2431 }
2432
2433 ctx->block->inputs[(i * 4) + j] = instr;
2434 }
2435
2436 if (so->inputs[n].bary || (ctx->type == TGSI_PROCESSOR_VERTEX)) {
2437 ctx->next_inloc += ncomp;
2438 so->total_in += ncomp;
2439 }
2440 }
2441 }
2442
2443 static void
2444 decl_out(struct ir3_compile_context *ctx, struct tgsi_full_declaration *decl)
2445 {
2446 struct ir3_shader_variant *so = ctx->so;
2447 unsigned comp = 0;
2448 unsigned name = decl->Semantic.Name;
2449 unsigned i;
2450
2451 compile_assert(ctx, decl->Declaration.Semantic);
2452
2453 DBG("decl out[%d] -> r%d", name, decl->Range.First);
2454
2455 if (ctx->type == TGSI_PROCESSOR_VERTEX) {
2456 switch (name) {
2457 case TGSI_SEMANTIC_POSITION:
2458 so->writes_pos = true;
2459 break;
2460 case TGSI_SEMANTIC_PSIZE:
2461 so->writes_psize = true;
2462 break;
2463 case TGSI_SEMANTIC_COLOR:
2464 case TGSI_SEMANTIC_BCOLOR:
2465 case TGSI_SEMANTIC_GENERIC:
2466 case TGSI_SEMANTIC_FOG:
2467 case TGSI_SEMANTIC_TEXCOORD:
2468 break;
2469 default:
2470 compile_error(ctx, "unknown VS semantic name: %s\n",
2471 tgsi_semantic_names[name]);
2472 }
2473 } else {
2474 switch (name) {
2475 case TGSI_SEMANTIC_POSITION:
2476 comp = 2; /* tgsi will write to .z component */
2477 so->writes_pos = true;
2478 break;
2479 case TGSI_SEMANTIC_COLOR:
2480 break;
2481 default:
2482 compile_error(ctx, "unknown FS semantic name: %s\n",
2483 tgsi_semantic_names[name]);
2484 }
2485 }
2486
2487 for (i = decl->Range.First; i <= decl->Range.Last; i++) {
2488 unsigned n = so->outputs_count++;
2489 unsigned ncomp, j;
2490
2491 ncomp = 4;
2492
2493 compile_assert(ctx, n < ARRAY_SIZE(so->outputs));
2494
2495 so->outputs[n].semantic = decl_semantic(&decl->Semantic);
2496 so->outputs[n].regid = regid(i, comp);
2497
2498 /* avoid undefined outputs, stick a dummy mov from imm{0.0},
2499 * which if the output is actually assigned will be over-
2500 * written
2501 */
2502 for (j = 0; j < ncomp; j++)
2503 ctx->block->outputs[(i * 4) + j] = create_immed(ctx, 0.0);
2504 }
2505 }
2506
2507 /* from TGSI perspective, we actually have inputs. But most of the "inputs"
2508 * for a fragment shader are just bary.f instructions. The *actual* inputs
2509 * from the hw perspective are the frag_pos and optionally frag_coord and
2510 * frag_face.
2511 */
2512 static void
2513 fixup_frag_inputs(struct ir3_compile_context *ctx)
2514 {
2515 struct ir3_shader_variant *so = ctx->so;
2516 struct ir3_block *block = ctx->block;
2517 struct ir3_instruction **inputs;
2518 struct ir3_instruction *instr;
2519 int n, regid = 0;
2520
2521 block->ninputs = 0;
2522
2523 n = 4; /* always have frag_pos */
2524 n += COND(so->frag_face, 4);
2525 n += COND(so->frag_coord, 4);
2526
2527 inputs = ir3_alloc(ctx->ir, n * (sizeof(struct ir3_instruction *)));
2528
2529 if (so->frag_face) {
2530 /* this ultimately gets assigned to hr0.x so doesn't conflict
2531 * with frag_coord/frag_pos..
2532 */
2533 inputs[block->ninputs++] = ctx->frag_face;
2534 ctx->frag_face->regs[0]->num = 0;
2535
2536 /* remaining channels not used, but let's avoid confusing
2537 * other parts that expect inputs to come in groups of vec4
2538 */
2539 inputs[block->ninputs++] = NULL;
2540 inputs[block->ninputs++] = NULL;
2541 inputs[block->ninputs++] = NULL;
2542 }
2543
2544 /* since we don't know where to set the regid for frag_coord,
2545 * we have to use r0.x for it. But we don't want to *always*
2546 * use r1.x for frag_pos as that could increase the register
2547 * footprint on simple shaders:
2548 */
2549 if (so->frag_coord) {
2550 ctx->frag_coord[0]->regs[0]->num = regid++;
2551 ctx->frag_coord[1]->regs[0]->num = regid++;
2552 ctx->frag_coord[2]->regs[0]->num = regid++;
2553 ctx->frag_coord[3]->regs[0]->num = regid++;
2554
2555 inputs[block->ninputs++] = ctx->frag_coord[0];
2556 inputs[block->ninputs++] = ctx->frag_coord[1];
2557 inputs[block->ninputs++] = ctx->frag_coord[2];
2558 inputs[block->ninputs++] = ctx->frag_coord[3];
2559 }
2560
2561 /* we always have frag_pos: */
2562 so->pos_regid = regid;
2563
2564 /* r0.x */
2565 instr = create_input(block, NULL, block->ninputs);
2566 instr->regs[0]->num = regid++;
2567 inputs[block->ninputs++] = instr;
2568 ctx->frag_pos->regs[1]->instr = instr;
2569
2570 /* r0.y */
2571 instr = create_input(block, NULL, block->ninputs);
2572 instr->regs[0]->num = regid++;
2573 inputs[block->ninputs++] = instr;
2574 ctx->frag_pos->regs[2]->instr = instr;
2575
2576 block->inputs = inputs;
2577 }
2578
2579 static void
2580 compile_instructions(struct ir3_compile_context *ctx)
2581 {
2582 push_block(ctx);
2583
2584 /* for fragment shader, we have a single input register (usually
2585 * r0.xy) which is used as the base for bary.f varying fetch instrs:
2586 */
2587 if (ctx->type == TGSI_PROCESSOR_FRAGMENT) {
2588 struct ir3_instruction *instr;
2589 instr = ir3_instr_create(ctx->block, -1, OPC_META_FI);
2590 ir3_reg_create(instr, 0, 0);
2591 ir3_reg_create(instr, 0, IR3_REG_SSA); /* r0.x */
2592 ir3_reg_create(instr, 0, IR3_REG_SSA); /* r0.y */
2593 ctx->frag_pos = instr;
2594 }
2595
2596 while (!tgsi_parse_end_of_tokens(&ctx->parser)) {
2597 tgsi_parse_token(&ctx->parser);
2598
2599 switch (ctx->parser.FullToken.Token.Type) {
2600 case TGSI_TOKEN_TYPE_DECLARATION: {
2601 struct tgsi_full_declaration *decl =
2602 &ctx->parser.FullToken.FullDeclaration;
2603 if (decl->Declaration.File == TGSI_FILE_OUTPUT) {
2604 decl_out(ctx, decl);
2605 } else if (decl->Declaration.File == TGSI_FILE_INPUT) {
2606 decl_in(ctx, decl);
2607 }
2608 break;
2609 }
2610 case TGSI_TOKEN_TYPE_IMMEDIATE: {
2611 /* TODO: if we know the immediate is small enough, and only
2612 * used with instructions that can embed an immediate, we
2613 * can skip this:
2614 */
2615 struct tgsi_full_immediate *imm =
2616 &ctx->parser.FullToken.FullImmediate;
2617 unsigned n = ctx->so->immediates_count++;
2618 compile_assert(ctx, n < ARRAY_SIZE(ctx->so->immediates));
2619 memcpy(ctx->so->immediates[n].val, imm->u, 16);
2620 break;
2621 }
2622 case TGSI_TOKEN_TYPE_INSTRUCTION: {
2623 struct tgsi_full_instruction *inst =
2624 &ctx->parser.FullToken.FullInstruction;
2625 unsigned opc = inst->Instruction.Opcode;
2626 const struct instr_translater *t = &translaters[opc];
2627
2628 if (t->fxn) {
2629 t->fxn(t, ctx, inst);
2630 ctx->num_internal_temps = 0;
2631
2632 compile_assert(ctx, !ctx->using_tmp_dst);
2633 } else {
2634 compile_error(ctx, "unknown TGSI opc: %s\n",
2635 tgsi_get_opcode_name(opc));
2636 }
2637
2638 switch (inst->Instruction.Saturate) {
2639 case TGSI_SAT_ZERO_ONE:
2640 create_clamp_imm(ctx, &inst->Dst[0].Register,
2641 fui(0.0), fui(1.0));
2642 break;
2643 case TGSI_SAT_MINUS_PLUS_ONE:
2644 create_clamp_imm(ctx, &inst->Dst[0].Register,
2645 fui(-1.0), fui(1.0));
2646 break;
2647 }
2648
2649 instr_finish(ctx);
2650
2651 break;
2652 }
2653 default:
2654 break;
2655 }
2656 }
2657 }
2658
2659 static void
2660 compile_dump(struct ir3_compile_context *ctx)
2661 {
2662 const char *name = (ctx->so->type == SHADER_VERTEX) ? "vert" : "frag";
2663 static unsigned n = 0;
2664 char fname[16];
2665 FILE *f;
2666 snprintf(fname, sizeof(fname), "%s-%04u.dot", name, n++);
2667 f = fopen(fname, "w");
2668 if (!f)
2669 return;
2670 ir3_block_depth(ctx->block);
2671 ir3_dump(ctx->ir, name, ctx->block, f);
2672 fclose(f);
2673 }
2674
2675 int
2676 ir3_compile_shader(struct ir3_shader_variant *so,
2677 const struct tgsi_token *tokens, struct ir3_shader_key key,
2678 bool cp)
2679 {
2680 struct ir3_compile_context ctx;
2681 struct ir3_block *block;
2682 struct ir3_instruction **inputs;
2683 unsigned i, j, actual_in;
2684 int ret = 0;
2685
2686 assert(!so->ir);
2687
2688 so->ir = ir3_create();
2689
2690 assert(so->ir);
2691
2692 if (compile_init(&ctx, so, tokens) != TGSI_PARSE_OK) {
2693 DBG("INIT failed!");
2694 ret = -1;
2695 goto out;
2696 }
2697
2698 compile_instructions(&ctx);
2699
2700 block = ctx.block;
2701
2702 /* keep track of the inputs from TGSI perspective.. */
2703 inputs = block->inputs;
2704
2705 /* but fixup actual inputs for frag shader: */
2706 if (ctx.type == TGSI_PROCESSOR_FRAGMENT)
2707 fixup_frag_inputs(&ctx);
2708
2709 /* at this point, for binning pass, throw away unneeded outputs: */
2710 if (key.binning_pass) {
2711 for (i = 0, j = 0; i < so->outputs_count; i++) {
2712 unsigned name = sem2name(so->outputs[i].semantic);
2713 unsigned idx = sem2name(so->outputs[i].semantic);
2714
2715 /* throw away everything but first position/psize */
2716 if ((idx == 0) && ((name == TGSI_SEMANTIC_POSITION) ||
2717 (name == TGSI_SEMANTIC_PSIZE))) {
2718 if (i != j) {
2719 so->outputs[j] = so->outputs[i];
2720 block->outputs[(j*4)+0] = block->outputs[(i*4)+0];
2721 block->outputs[(j*4)+1] = block->outputs[(i*4)+1];
2722 block->outputs[(j*4)+2] = block->outputs[(i*4)+2];
2723 block->outputs[(j*4)+3] = block->outputs[(i*4)+3];
2724 }
2725 j++;
2726 }
2727 }
2728 so->outputs_count = j;
2729 block->noutputs = j * 4;
2730 }
2731
2732 /* for rendering to alpha format, we only need the .w component,
2733 * and we need it to be in the .x position:
2734 */
2735 if (key.alpha) {
2736 for (i = 0, j = 0; i < so->outputs_count; i++) {
2737 unsigned name = sem2name(so->outputs[i].semantic);
2738
2739 /* move .w component to .x and discard others: */
2740 if (name == TGSI_SEMANTIC_COLOR) {
2741 block->outputs[(i*4)+0] = block->outputs[(i*4)+3];
2742 block->outputs[(i*4)+1] = NULL;
2743 block->outputs[(i*4)+2] = NULL;
2744 block->outputs[(i*4)+3] = NULL;
2745 }
2746 }
2747 }
2748
2749 /* at this point, we want the kill's in the outputs array too,
2750 * so that they get scheduled (since they have no dst).. we've
2751 * already ensured that the array is big enough in push_block():
2752 */
2753 if (ctx.type == TGSI_PROCESSOR_FRAGMENT) {
2754 for (i = 0; i < ctx.kill_count; i++)
2755 block->outputs[block->noutputs++] = ctx.kill[i];
2756 }
2757
2758 if (fd_mesa_debug & FD_DBG_OPTDUMP)
2759 compile_dump(&ctx);
2760
2761 ret = ir3_block_flatten(block);
2762 if (ret < 0) {
2763 DBG("FLATTEN failed!");
2764 goto out;
2765 }
2766 if ((ret > 0) && (fd_mesa_debug & FD_DBG_OPTDUMP))
2767 compile_dump(&ctx);
2768
2769 if (fd_mesa_debug & FD_DBG_OPTMSGS) {
2770 printf("BEFORE CP:\n");
2771 ir3_dump_instr_list(block->head);
2772 }
2773
2774 if (cp)
2775 ir3_block_cp(block);
2776
2777 if (fd_mesa_debug & FD_DBG_OPTDUMP)
2778 compile_dump(&ctx);
2779
2780 ir3_block_depth(block);
2781
2782 if (fd_mesa_debug & FD_DBG_OPTMSGS) {
2783 printf("AFTER DEPTH:\n");
2784 ir3_dump_instr_list(block->head);
2785 }
2786
2787 ret = ir3_block_sched(block);
2788 if (ret) {
2789 DBG("SCHED failed!");
2790 goto out;
2791 }
2792
2793 if (fd_mesa_debug & FD_DBG_OPTMSGS) {
2794 printf("AFTER SCHED:\n");
2795 ir3_dump_instr_list(block->head);
2796 }
2797
2798 ret = ir3_block_ra(block, so->type, key.half_precision,
2799 so->frag_coord, so->frag_face, &so->has_samp);
2800 if (ret) {
2801 DBG("RA failed!");
2802 goto out;
2803 }
2804
2805 if (fd_mesa_debug & FD_DBG_OPTMSGS) {
2806 printf("AFTER RA:\n");
2807 ir3_dump_instr_list(block->head);
2808 }
2809
2810 /* fixup input/outputs: */
2811 for (i = 0; i < so->outputs_count; i++) {
2812 so->outputs[i].regid = block->outputs[i*4]->regs[0]->num;
2813 /* preserve hack for depth output.. tgsi writes depth to .z,
2814 * but what we give the hw is the scalar register:
2815 */
2816 if ((ctx.type == TGSI_PROCESSOR_FRAGMENT) &&
2817 (sem2name(so->outputs[i].semantic) == TGSI_SEMANTIC_POSITION))
2818 so->outputs[i].regid += 2;
2819 }
2820 /* Note that some or all channels of an input may be unused: */
2821 actual_in = 0;
2822 for (i = 0; i < so->inputs_count; i++) {
2823 unsigned j, regid = ~0, compmask = 0;
2824 so->inputs[i].ncomp = 0;
2825 for (j = 0; j < 4; j++) {
2826 struct ir3_instruction *in = inputs[(i*4) + j];
2827 if (in) {
2828 compmask |= (1 << j);
2829 regid = in->regs[0]->num - j;
2830 actual_in++;
2831 so->inputs[i].ncomp++;
2832 }
2833 }
2834 so->inputs[i].regid = regid;
2835 so->inputs[i].compmask = compmask;
2836 }
2837
2838 /* fragment shader always gets full vec4's even if it doesn't
2839 * fetch all components, but vertex shader we need to update
2840 * with the actual number of components fetch, otherwise thing
2841 * will hang due to mismaptch between VFD_DECODE's and
2842 * TOTALATTRTOVS
2843 */
2844 if (so->type == SHADER_VERTEX)
2845 so->total_in = actual_in;
2846
2847 out:
2848 if (ret) {
2849 ir3_destroy(so->ir);
2850 so->ir = NULL;
2851 }
2852 compile_free(&ctx);
2853
2854 return ret;
2855 }