freedreno/a3xx/compiler: rename ir3_shader to ir3
[mesa.git] / src / gallium / drivers / freedreno / a3xx / fd3_compiler_old.c
1 /* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
2
3 /*
4 * Copyright (C) 2013 Rob Clark <robclark@freedesktop.org>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * SOFTWARE.
24 *
25 * Authors:
26 * Rob Clark <robclark@freedesktop.org>
27 */
28
29 #include <stdarg.h>
30
31 #include "pipe/p_state.h"
32 #include "util/u_string.h"
33 #include "util/u_memory.h"
34 #include "util/u_inlines.h"
35 #include "tgsi/tgsi_parse.h"
36 #include "tgsi/tgsi_ureg.h"
37 #include "tgsi/tgsi_info.h"
38 #include "tgsi/tgsi_strings.h"
39 #include "tgsi/tgsi_dump.h"
40 #include "tgsi/tgsi_scan.h"
41
42 #include "freedreno_lowering.h"
43
44 #include "fd3_compiler.h"
45 #include "fd3_program.h"
46 #include "fd3_util.h"
47
48 #include "instr-a3xx.h"
49 #include "ir3.h"
50
51
52 struct fd3_compile_context {
53 const struct tgsi_token *tokens;
54 bool free_tokens;
55 struct ir3 *ir;
56 struct ir3_block *block;
57 struct fd3_shader_variant *so;
58
59 struct tgsi_parse_context parser;
60 unsigned type;
61
62 struct tgsi_shader_info info;
63
64 /* last input dst (for setting (ei) flag): */
65 struct ir3_register *last_input;
66
67 /* last instruction with relative addressing: */
68 struct ir3_instruction *last_rel;
69
70 /* for calculating input/output positions/linkages: */
71 unsigned next_inloc;
72
73 unsigned num_internal_temps;
74 struct tgsi_src_register internal_temps[6];
75
76 /* track registers which need to synchronize w/ "complex alu" cat3
77 * instruction pipeline:
78 */
79 regmask_t needs_ss;
80
81 /* track registers which need to synchronize with texture fetch
82 * pipeline:
83 */
84 regmask_t needs_sy;
85
86 /* inputs start at r0, temporaries start after last input, and
87 * outputs start after last temporary.
88 *
89 * We could be more clever, because this is not a hw restriction,
90 * but probably best just to implement an optimizing pass to
91 * reduce the # of registers used and get rid of redundant mov's
92 * (to output register).
93 */
94 unsigned base_reg[TGSI_FILE_COUNT];
95
96 /* idx/slot for last compiler generated immediate */
97 unsigned immediate_idx;
98
99 /* stack of branch instructions that start (potentially nested)
100 * branch instructions, so that we can fix up the branch targets
101 * so that we can fix up the branch target on the corresponding
102 * END instruction
103 */
104 struct ir3_instruction *branch[16];
105 unsigned int branch_count;
106
107 /* used when dst is same as one of the src, to avoid overwriting a
108 * src element before the remaining scalar instructions that make
109 * up the vector operation
110 */
111 struct tgsi_dst_register tmp_dst;
112 struct tgsi_src_register *tmp_src;
113 };
114
115
116 static void vectorize(struct fd3_compile_context *ctx,
117 struct ir3_instruction *instr, struct tgsi_dst_register *dst,
118 int nsrcs, ...);
119 static void create_mov(struct fd3_compile_context *ctx,
120 struct tgsi_dst_register *dst, struct tgsi_src_register *src);
121
122 static unsigned
123 compile_init(struct fd3_compile_context *ctx, struct fd3_shader_variant *so,
124 const struct tgsi_token *tokens)
125 {
126 unsigned ret, base = 0;
127 struct tgsi_shader_info *info = &ctx->info;
128 const struct fd_lowering_config lconfig = {
129 .color_two_side = so->key.color_two_side,
130 .lower_DST = true,
131 .lower_XPD = true,
132 .lower_SCS = true,
133 .lower_LRP = true,
134 .lower_FRC = true,
135 .lower_POW = true,
136 .lower_LIT = true,
137 .lower_EXP = true,
138 .lower_LOG = true,
139 .lower_DP4 = true,
140 .lower_DP3 = true,
141 .lower_DPH = true,
142 .lower_DP2 = true,
143 .lower_DP2A = true,
144 };
145
146 ctx->tokens = fd_transform_lowering(&lconfig, tokens, &ctx->info);
147 ctx->free_tokens = !!ctx->tokens;
148 if (!ctx->tokens) {
149 /* no lowering */
150 ctx->tokens = tokens;
151 }
152 ctx->ir = so->ir;
153 ctx->block = ir3_block_create(ctx->ir, 0, 0, 0);
154 ctx->so = so;
155 ctx->last_input = NULL;
156 ctx->last_rel = NULL;
157 ctx->next_inloc = 8;
158 ctx->num_internal_temps = 0;
159 ctx->branch_count = 0;
160
161 regmask_init(&ctx->needs_ss);
162 regmask_init(&ctx->needs_sy);
163 memset(ctx->base_reg, 0, sizeof(ctx->base_reg));
164
165 /* Immediates go after constants: */
166 ctx->base_reg[TGSI_FILE_CONSTANT] = 0;
167 ctx->base_reg[TGSI_FILE_IMMEDIATE] =
168 info->file_max[TGSI_FILE_CONSTANT] + 1;
169
170 /* if full precision and fragment shader, don't clobber
171 * r0.x w/ bary fetch:
172 */
173 if ((so->type == SHADER_FRAGMENT) && !so->key.half_precision)
174 base = 1;
175
176 /* Temporaries after outputs after inputs: */
177 ctx->base_reg[TGSI_FILE_INPUT] = base;
178 ctx->base_reg[TGSI_FILE_OUTPUT] = base +
179 info->file_max[TGSI_FILE_INPUT] + 1;
180 ctx->base_reg[TGSI_FILE_TEMPORARY] = base +
181 info->file_max[TGSI_FILE_INPUT] + 1 +
182 info->file_max[TGSI_FILE_OUTPUT] + 1;
183
184 so->first_immediate = ctx->base_reg[TGSI_FILE_IMMEDIATE];
185 ctx->immediate_idx = 4 * (ctx->info.file_max[TGSI_FILE_IMMEDIATE] + 1);
186
187 ret = tgsi_parse_init(&ctx->parser, ctx->tokens);
188 if (ret != TGSI_PARSE_OK)
189 return ret;
190
191 ctx->type = ctx->parser.FullHeader.Processor.Processor;
192
193 return ret;
194 }
195
196 static void
197 compile_error(struct fd3_compile_context *ctx, const char *format, ...)
198 {
199 va_list ap;
200 va_start(ap, format);
201 _debug_vprintf(format, ap);
202 va_end(ap);
203 tgsi_dump(ctx->tokens, 0);
204 debug_assert(0);
205 }
206
207 #define compile_assert(ctx, cond) do { \
208 if (!(cond)) compile_error((ctx), "failed assert: "#cond"\n"); \
209 } while (0)
210
211 static void
212 compile_free(struct fd3_compile_context *ctx)
213 {
214 if (ctx->free_tokens)
215 free((void *)ctx->tokens);
216 tgsi_parse_free(&ctx->parser);
217 }
218
219 struct instr_translater {
220 void (*fxn)(const struct instr_translater *t,
221 struct fd3_compile_context *ctx,
222 struct tgsi_full_instruction *inst);
223 unsigned tgsi_opc;
224 opc_t opc;
225 opc_t hopc; /* opc to use for half_precision mode, if different */
226 unsigned arg;
227 };
228
229 static void
230 handle_last_rel(struct fd3_compile_context *ctx)
231 {
232 if (ctx->last_rel) {
233 ctx->last_rel->flags |= IR3_INSTR_UL;
234 ctx->last_rel = NULL;
235 }
236 }
237
238 static struct ir3_instruction *
239 instr_create(struct fd3_compile_context *ctx, int category, opc_t opc)
240 {
241 return ir3_instr_create(ctx->block, category, opc);
242 }
243
244 static void
245 add_nop(struct fd3_compile_context *ctx, unsigned count)
246 {
247 while (count-- > 0)
248 instr_create(ctx, 0, OPC_NOP);
249 }
250
251 static unsigned
252 src_flags(struct fd3_compile_context *ctx, struct ir3_register *reg)
253 {
254 unsigned flags = 0;
255
256 if (reg->flags & (IR3_REG_CONST | IR3_REG_IMMED))
257 return flags;
258
259 if (regmask_get(&ctx->needs_ss, reg)) {
260 flags |= IR3_INSTR_SS;
261 regmask_init(&ctx->needs_ss);
262 }
263
264 if (regmask_get(&ctx->needs_sy, reg)) {
265 flags |= IR3_INSTR_SY;
266 regmask_init(&ctx->needs_sy);
267 }
268
269 return flags;
270 }
271
272 static struct ir3_register *
273 add_dst_reg(struct fd3_compile_context *ctx, struct ir3_instruction *instr,
274 const struct tgsi_dst_register *dst, unsigned chan)
275 {
276 unsigned flags = 0, num = 0;
277 struct ir3_register *reg;
278
279 switch (dst->File) {
280 case TGSI_FILE_OUTPUT:
281 case TGSI_FILE_TEMPORARY:
282 num = dst->Index + ctx->base_reg[dst->File];
283 break;
284 case TGSI_FILE_ADDRESS:
285 num = REG_A0;
286 break;
287 default:
288 compile_error(ctx, "unsupported dst register file: %s\n",
289 tgsi_file_name(dst->File));
290 break;
291 }
292
293 if (dst->Indirect)
294 flags |= IR3_REG_RELATIV;
295 if (ctx->so->key.half_precision)
296 flags |= IR3_REG_HALF;
297
298 reg = ir3_reg_create(instr, regid(num, chan), flags);
299
300 if (dst->Indirect)
301 ctx->last_rel = instr;
302
303 return reg;
304 }
305
306 static struct ir3_register *
307 add_src_reg(struct fd3_compile_context *ctx, struct ir3_instruction *instr,
308 const struct tgsi_src_register *src, unsigned chan)
309 {
310 unsigned flags = 0, num = 0;
311 struct ir3_register *reg;
312
313 /* TODO we need to use a mov to temp for const >= 64.. or maybe
314 * we could use relative addressing..
315 */
316 compile_assert(ctx, src->Index < 64);
317
318 switch (src->File) {
319 case TGSI_FILE_IMMEDIATE:
320 /* TODO if possible, use actual immediate instead of const.. but
321 * TGSI has vec4 immediates, we can only embed scalar (of limited
322 * size, depending on instruction..)
323 */
324 case TGSI_FILE_CONSTANT:
325 flags |= IR3_REG_CONST;
326 num = src->Index + ctx->base_reg[src->File];
327 break;
328 case TGSI_FILE_OUTPUT:
329 /* NOTE: we should only end up w/ OUTPUT file for things like
330 * clamp()'ing saturated dst instructions
331 */
332 case TGSI_FILE_INPUT:
333 case TGSI_FILE_TEMPORARY:
334 num = src->Index + ctx->base_reg[src->File];
335 break;
336 default:
337 compile_error(ctx, "unsupported src register file: %s\n",
338 tgsi_file_name(src->File));
339 break;
340 }
341
342 if (src->Absolute)
343 flags |= IR3_REG_ABS;
344 if (src->Negate)
345 flags |= IR3_REG_NEGATE;
346 if (src->Indirect)
347 flags |= IR3_REG_RELATIV;
348 if (ctx->so->key.half_precision)
349 flags |= IR3_REG_HALF;
350
351 reg = ir3_reg_create(instr, regid(num, chan), flags);
352
353 if (src->Indirect)
354 ctx->last_rel = instr;
355
356 instr->flags |= src_flags(ctx, reg);
357
358 return reg;
359 }
360
361 static void
362 src_from_dst(struct tgsi_src_register *src, struct tgsi_dst_register *dst)
363 {
364 src->File = dst->File;
365 src->Indirect = dst->Indirect;
366 src->Dimension = dst->Dimension;
367 src->Index = dst->Index;
368 src->Absolute = 0;
369 src->Negate = 0;
370 src->SwizzleX = TGSI_SWIZZLE_X;
371 src->SwizzleY = TGSI_SWIZZLE_Y;
372 src->SwizzleZ = TGSI_SWIZZLE_Z;
373 src->SwizzleW = TGSI_SWIZZLE_W;
374 }
375
376 /* Get internal-temp src/dst to use for a sequence of instructions
377 * generated by a single TGSI op.
378 */
379 static struct tgsi_src_register *
380 get_internal_temp(struct fd3_compile_context *ctx,
381 struct tgsi_dst_register *tmp_dst)
382 {
383 struct tgsi_src_register *tmp_src;
384 int n;
385
386 tmp_dst->File = TGSI_FILE_TEMPORARY;
387 tmp_dst->WriteMask = TGSI_WRITEMASK_XYZW;
388 tmp_dst->Indirect = 0;
389 tmp_dst->Dimension = 0;
390
391 /* assign next temporary: */
392 n = ctx->num_internal_temps++;
393 compile_assert(ctx, n < ARRAY_SIZE(ctx->internal_temps));
394 tmp_src = &ctx->internal_temps[n];
395
396 tmp_dst->Index = ctx->info.file_max[TGSI_FILE_TEMPORARY] + n + 1;
397
398 src_from_dst(tmp_src, tmp_dst);
399
400 return tmp_src;
401 }
402
403 /* Get internal half-precision temp src/dst to use for a sequence of
404 * instructions generated by a single TGSI op.
405 */
406 static struct tgsi_src_register *
407 get_internal_temp_hr(struct fd3_compile_context *ctx,
408 struct tgsi_dst_register *tmp_dst)
409 {
410 struct tgsi_src_register *tmp_src;
411 int n;
412
413 if (ctx->so->key.half_precision)
414 return get_internal_temp(ctx, tmp_dst);
415
416 tmp_dst->File = TGSI_FILE_TEMPORARY;
417 tmp_dst->WriteMask = TGSI_WRITEMASK_XYZW;
418 tmp_dst->Indirect = 0;
419 tmp_dst->Dimension = 0;
420
421 /* assign next temporary: */
422 n = ctx->num_internal_temps++;
423 compile_assert(ctx, n < ARRAY_SIZE(ctx->internal_temps));
424 tmp_src = &ctx->internal_temps[n];
425
426 /* just use hr0 because no one else should be using half-
427 * precision regs:
428 */
429 tmp_dst->Index = 0;
430
431 src_from_dst(tmp_src, tmp_dst);
432
433 return tmp_src;
434 }
435
436 static inline bool
437 is_const(struct tgsi_src_register *src)
438 {
439 return (src->File == TGSI_FILE_CONSTANT) ||
440 (src->File == TGSI_FILE_IMMEDIATE);
441 }
442
443 static inline bool
444 is_relative(struct tgsi_src_register *src)
445 {
446 return src->Indirect;
447 }
448
449 static inline bool
450 is_rel_or_const(struct tgsi_src_register *src)
451 {
452 return is_relative(src) || is_const(src);
453 }
454
455 static type_t
456 get_ftype(struct fd3_compile_context *ctx)
457 {
458 return ctx->so->key.half_precision ? TYPE_F16 : TYPE_F32;
459 }
460
461 static type_t
462 get_utype(struct fd3_compile_context *ctx)
463 {
464 return ctx->so->key.half_precision ? TYPE_U16 : TYPE_U32;
465 }
466
467 static unsigned
468 src_swiz(struct tgsi_src_register *src, int chan)
469 {
470 switch (chan) {
471 case 0: return src->SwizzleX;
472 case 1: return src->SwizzleY;
473 case 2: return src->SwizzleZ;
474 case 3: return src->SwizzleW;
475 }
476 assert(0);
477 return 0;
478 }
479
480 /* for instructions that cannot take a const register as src, if needed
481 * generate a move to temporary gpr:
482 */
483 static struct tgsi_src_register *
484 get_unconst(struct fd3_compile_context *ctx, struct tgsi_src_register *src)
485 {
486 struct tgsi_dst_register tmp_dst;
487 struct tgsi_src_register *tmp_src;
488
489 compile_assert(ctx, is_rel_or_const(src));
490
491 tmp_src = get_internal_temp(ctx, &tmp_dst);
492
493 create_mov(ctx, &tmp_dst, src);
494
495 return tmp_src;
496 }
497
498 static void
499 get_immediate(struct fd3_compile_context *ctx,
500 struct tgsi_src_register *reg, uint32_t val)
501 {
502 unsigned neg, swiz, idx, i;
503 /* actually maps 1:1 currently.. not sure if that is safe to rely on: */
504 static const unsigned swiz2tgsi[] = {
505 TGSI_SWIZZLE_X, TGSI_SWIZZLE_Y, TGSI_SWIZZLE_Z, TGSI_SWIZZLE_W,
506 };
507
508 for (i = 0; i < ctx->immediate_idx; i++) {
509 swiz = i % 4;
510 idx = i / 4;
511
512 if (ctx->so->immediates[idx].val[swiz] == val) {
513 neg = 0;
514 break;
515 }
516
517 if (ctx->so->immediates[idx].val[swiz] == -val) {
518 neg = 1;
519 break;
520 }
521 }
522
523 if (i == ctx->immediate_idx) {
524 /* need to generate a new immediate: */
525 swiz = i % 4;
526 idx = i / 4;
527 neg = 0;
528 ctx->so->immediates[idx].val[swiz] = val;
529 ctx->so->immediates_count = idx + 1;
530 ctx->immediate_idx++;
531 }
532
533 reg->File = TGSI_FILE_IMMEDIATE;
534 reg->Indirect = 0;
535 reg->Dimension = 0;
536 reg->Index = idx;
537 reg->Absolute = 0;
538 reg->Negate = neg;
539 reg->SwizzleX = swiz2tgsi[swiz];
540 reg->SwizzleY = swiz2tgsi[swiz];
541 reg->SwizzleZ = swiz2tgsi[swiz];
542 reg->SwizzleW = swiz2tgsi[swiz];
543 }
544
545 static void
546 create_mov(struct fd3_compile_context *ctx, struct tgsi_dst_register *dst,
547 struct tgsi_src_register *src)
548 {
549 type_t type_mov = get_ftype(ctx);
550 unsigned i;
551
552 for (i = 0; i < 4; i++) {
553 /* move to destination: */
554 if (dst->WriteMask & (1 << i)) {
555 struct ir3_instruction *instr;
556
557 if (src->Absolute || src->Negate) {
558 /* can't have abs or neg on a mov instr, so use
559 * absneg.f instead to handle these cases:
560 */
561 instr = instr_create(ctx, 2, OPC_ABSNEG_F);
562 } else {
563 instr = instr_create(ctx, 1, 0);
564 instr->cat1.src_type = type_mov;
565 instr->cat1.dst_type = type_mov;
566 }
567
568 add_dst_reg(ctx, instr, dst, i);
569 add_src_reg(ctx, instr, src, src_swiz(src, i));
570 } else {
571 add_nop(ctx, 1);
572 }
573 }
574 }
575
576 static void
577 create_clamp(struct fd3_compile_context *ctx,
578 struct tgsi_dst_register *dst, struct tgsi_src_register *val,
579 struct tgsi_src_register *minval, struct tgsi_src_register *maxval)
580 {
581 struct ir3_instruction *instr;
582
583 instr = instr_create(ctx, 2, OPC_MAX_F);
584 vectorize(ctx, instr, dst, 2, val, 0, minval, 0);
585
586 instr = instr_create(ctx, 2, OPC_MIN_F);
587 vectorize(ctx, instr, dst, 2, val, 0, maxval, 0);
588 }
589
590 static void
591 create_clamp_imm(struct fd3_compile_context *ctx,
592 struct tgsi_dst_register *dst,
593 uint32_t minval, uint32_t maxval)
594 {
595 struct tgsi_src_register minconst, maxconst;
596 struct tgsi_src_register src;
597
598 src_from_dst(&src, dst);
599
600 get_immediate(ctx, &minconst, minval);
601 get_immediate(ctx, &maxconst, maxval);
602
603 create_clamp(ctx, dst, &src, &minconst, &maxconst);
604 }
605
606 static struct tgsi_dst_register *
607 get_dst(struct fd3_compile_context *ctx, struct tgsi_full_instruction *inst)
608 {
609 struct tgsi_dst_register *dst = &inst->Dst[0].Register;
610 unsigned i;
611 for (i = 0; i < inst->Instruction.NumSrcRegs; i++) {
612 struct tgsi_src_register *src = &inst->Src[i].Register;
613 if ((src->File == dst->File) && (src->Index == dst->Index)) {
614 if ((dst->WriteMask == TGSI_WRITEMASK_XYZW) &&
615 (src->SwizzleX == TGSI_SWIZZLE_X) &&
616 (src->SwizzleY == TGSI_SWIZZLE_Y) &&
617 (src->SwizzleZ == TGSI_SWIZZLE_Z) &&
618 (src->SwizzleW == TGSI_SWIZZLE_W))
619 continue;
620 ctx->tmp_src = get_internal_temp(ctx, &ctx->tmp_dst);
621 ctx->tmp_dst.WriteMask = dst->WriteMask;
622 dst = &ctx->tmp_dst;
623 break;
624 }
625 }
626 return dst;
627 }
628
629 static void
630 put_dst(struct fd3_compile_context *ctx, struct tgsi_full_instruction *inst,
631 struct tgsi_dst_register *dst)
632 {
633 /* if necessary, add mov back into original dst: */
634 if (dst != &inst->Dst[0].Register) {
635 create_mov(ctx, &inst->Dst[0].Register, ctx->tmp_src);
636 }
637 }
638
639 /* helper to generate the necessary repeat and/or additional instructions
640 * to turn a scalar instruction into a vector operation:
641 */
642 static void
643 vectorize(struct fd3_compile_context *ctx, struct ir3_instruction *instr,
644 struct tgsi_dst_register *dst, int nsrcs, ...)
645 {
646 va_list ap;
647 int i, j, n = 0;
648 bool indirect = dst->Indirect;
649
650 add_dst_reg(ctx, instr, dst, TGSI_SWIZZLE_X);
651
652 va_start(ap, nsrcs);
653 for (j = 0; j < nsrcs; j++) {
654 struct tgsi_src_register *src =
655 va_arg(ap, struct tgsi_src_register *);
656 unsigned flags = va_arg(ap, unsigned);
657 struct ir3_register *reg;
658 if (flags & IR3_REG_IMMED) {
659 reg = ir3_reg_create(instr, 0, IR3_REG_IMMED);
660 /* this is an ugly cast.. should have put flags first! */
661 reg->iim_val = *(int *)&src;
662 } else {
663 reg = add_src_reg(ctx, instr, src, TGSI_SWIZZLE_X);
664 indirect |= src->Indirect;
665 }
666 reg->flags |= flags & ~IR3_REG_NEGATE;
667 if (flags & IR3_REG_NEGATE)
668 reg->flags ^= IR3_REG_NEGATE;
669 }
670 va_end(ap);
671
672 for (i = 0; i < 4; i++) {
673 if (dst->WriteMask & (1 << i)) {
674 struct ir3_instruction *cur;
675
676 if (n++ == 0) {
677 cur = instr;
678 } else {
679 cur = ir3_instr_clone(instr);
680 cur->flags &= ~(IR3_INSTR_SY | IR3_INSTR_SS | IR3_INSTR_JP);
681 }
682
683 /* fix-up dst register component: */
684 cur->regs[0]->num = regid(cur->regs[0]->num >> 2, i);
685
686 /* fix-up src register component: */
687 va_start(ap, nsrcs);
688 for (j = 0; j < nsrcs; j++) {
689 struct tgsi_src_register *src =
690 va_arg(ap, struct tgsi_src_register *);
691 unsigned flags = va_arg(ap, unsigned);
692 if (!(flags & IR3_REG_IMMED)) {
693 cur->regs[j+1]->num =
694 regid(cur->regs[j+1]->num >> 2,
695 src_swiz(src, i));
696 cur->flags |= src_flags(ctx, cur->regs[j+1]);
697 }
698 }
699 va_end(ap);
700
701 if (indirect)
702 ctx->last_rel = cur;
703 }
704 }
705
706 /* pad w/ nop's.. at least until we are clever enough to
707 * figure out if we really need to..
708 */
709 add_nop(ctx, 4 - n);
710 }
711
712 /*
713 * Handlers for TGSI instructions which do not have a 1:1 mapping to
714 * native instructions:
715 */
716
717 static void
718 trans_clamp(const struct instr_translater *t,
719 struct fd3_compile_context *ctx,
720 struct tgsi_full_instruction *inst)
721 {
722 struct tgsi_dst_register *dst = get_dst(ctx, inst);
723 struct tgsi_src_register *src0 = &inst->Src[0].Register;
724 struct tgsi_src_register *src1 = &inst->Src[1].Register;
725 struct tgsi_src_register *src2 = &inst->Src[2].Register;
726
727 create_clamp(ctx, dst, src0, src1, src2);
728
729 put_dst(ctx, inst, dst);
730 }
731
732 /* ARL(x) = x, but mova from hrN.x to a0.. */
733 static void
734 trans_arl(const struct instr_translater *t,
735 struct fd3_compile_context *ctx,
736 struct tgsi_full_instruction *inst)
737 {
738 struct ir3_instruction *instr;
739 struct tgsi_dst_register tmp_dst;
740 struct tgsi_src_register *tmp_src;
741 struct tgsi_dst_register *dst = &inst->Dst[0].Register;
742 struct tgsi_src_register *src = &inst->Src[0].Register;
743 unsigned chan = src->SwizzleX;
744 compile_assert(ctx, dst->File == TGSI_FILE_ADDRESS);
745
746 handle_last_rel(ctx);
747
748 tmp_src = get_internal_temp_hr(ctx, &tmp_dst);
749
750 /* cov.{f32,f16}s16 Rtmp, Rsrc */
751 instr = instr_create(ctx, 1, 0);
752 instr->cat1.src_type = get_ftype(ctx);
753 instr->cat1.dst_type = TYPE_S16;
754 add_dst_reg(ctx, instr, &tmp_dst, chan)->flags |= IR3_REG_HALF;
755 add_src_reg(ctx, instr, src, chan);
756
757 add_nop(ctx, 3);
758
759 /* shl.b Rtmp, Rtmp, 2 */
760 instr = instr_create(ctx, 2, OPC_SHL_B);
761 add_dst_reg(ctx, instr, &tmp_dst, chan)->flags |= IR3_REG_HALF;
762 add_src_reg(ctx, instr, tmp_src, chan)->flags |= IR3_REG_HALF;
763 ir3_reg_create(instr, 0, IR3_REG_IMMED)->iim_val = 2;
764
765 add_nop(ctx, 3);
766
767 /* mova a0, Rtmp */
768 instr = instr_create(ctx, 1, 0);
769 instr->cat1.src_type = TYPE_S16;
770 instr->cat1.dst_type = TYPE_S16;
771 add_dst_reg(ctx, instr, dst, 0)->flags |= IR3_REG_HALF;
772 add_src_reg(ctx, instr, tmp_src, chan)->flags |= IR3_REG_HALF;
773
774 /* need to ensure 5 instr slots before a0 is used: */
775 add_nop(ctx, 6);
776 }
777
778 /* texture fetch/sample instructions: */
779 static void
780 trans_samp(const struct instr_translater *t,
781 struct fd3_compile_context *ctx,
782 struct tgsi_full_instruction *inst)
783 {
784 struct ir3_register *r;
785 struct ir3_instruction *instr;
786 struct tgsi_src_register *coord = &inst->Src[0].Register;
787 struct tgsi_src_register *samp = &inst->Src[1].Register;
788 unsigned tex = inst->Texture.Texture;
789 int8_t *order;
790 unsigned i, flags = 0, src_wrmask;
791 bool needs_mov = false;
792
793 switch (t->arg) {
794 case TGSI_OPCODE_TEX:
795 if (tex == TGSI_TEXTURE_2D) {
796 order = (int8_t[4]){ 0, 1, -1, -1 };
797 src_wrmask = TGSI_WRITEMASK_XY;
798 } else {
799 order = (int8_t[4]){ 0, 1, 2, -1 };
800 src_wrmask = TGSI_WRITEMASK_XYZ;
801 }
802 break;
803 case TGSI_OPCODE_TXP:
804 if (tex == TGSI_TEXTURE_2D) {
805 order = (int8_t[4]){ 0, 1, 3, -1 };
806 src_wrmask = TGSI_WRITEMASK_XYZ;
807 } else {
808 order = (int8_t[4]){ 0, 1, 2, 3 };
809 src_wrmask = TGSI_WRITEMASK_XYZW;
810 }
811 flags |= IR3_INSTR_P;
812 break;
813 default:
814 compile_assert(ctx, 0);
815 break;
816 }
817
818 if ((tex == TGSI_TEXTURE_3D) || (tex == TGSI_TEXTURE_CUBE)) {
819 add_nop(ctx, 3);
820 flags |= IR3_INSTR_3D;
821 }
822
823 /* cat5 instruction cannot seem to handle const or relative: */
824 if (is_rel_or_const(coord))
825 needs_mov = true;
826
827 /* The texture sample instructions need to coord in successive
828 * registers/components (ie. src.xy but not src.yx). And TXP
829 * needs the .w component in .z for 2D.. so in some cases we
830 * might need to emit some mov instructions to shuffle things
831 * around:
832 */
833 for (i = 1; (i < 4) && (order[i] >= 0) && !needs_mov; i++)
834 if (src_swiz(coord, i) != (src_swiz(coord, 0) + order[i]))
835 needs_mov = true;
836
837 if (needs_mov) {
838 struct tgsi_dst_register tmp_dst;
839 struct tgsi_src_register *tmp_src;
840 unsigned j;
841
842 type_t type_mov = get_ftype(ctx);
843
844 /* need to move things around: */
845 tmp_src = get_internal_temp(ctx, &tmp_dst);
846
847 for (j = 0; (j < 4) && (order[j] >= 0); j++) {
848 instr = instr_create(ctx, 1, 0);
849 instr->cat1.src_type = type_mov;
850 instr->cat1.dst_type = type_mov;
851 add_dst_reg(ctx, instr, &tmp_dst, j);
852 add_src_reg(ctx, instr, coord,
853 src_swiz(coord, order[j]));
854 }
855
856 coord = tmp_src;
857
858 add_nop(ctx, 4 - j);
859 }
860
861 instr = instr_create(ctx, 5, t->opc);
862 instr->cat5.type = get_ftype(ctx);
863 instr->cat5.samp = samp->Index;
864 instr->cat5.tex = samp->Index;
865 instr->flags |= flags;
866
867 r = add_dst_reg(ctx, instr, &inst->Dst[0].Register, 0);
868 r->wrmask = inst->Dst[0].Register.WriteMask;
869
870 add_src_reg(ctx, instr, coord, coord->SwizzleX)->wrmask = src_wrmask;
871
872 /* after add_src_reg() so we don't set (sy) on sam instr itself! */
873 regmask_set(&ctx->needs_sy, r);
874 }
875
876 /*
877 * SEQ(a,b) = (a == b) ? 1.0 : 0.0
878 * cmps.f.eq tmp0, b, a
879 * cov.u16f16 dst, tmp0
880 *
881 * SNE(a,b) = (a != b) ? 1.0 : 0.0
882 * cmps.f.eq tmp0, b, a
883 * add.s tmp0, tmp0, -1
884 * sel.f16 dst, {0.0}, tmp0, {1.0}
885 *
886 * SGE(a,b) = (a >= b) ? 1.0 : 0.0
887 * cmps.f.ge tmp0, a, b
888 * cov.u16f16 dst, tmp0
889 *
890 * SLE(a,b) = (a <= b) ? 1.0 : 0.0
891 * cmps.f.ge tmp0, b, a
892 * cov.u16f16 dst, tmp0
893 *
894 * SGT(a,b) = (a > b) ? 1.0 : 0.0
895 * cmps.f.ge tmp0, b, a
896 * add.s tmp0, tmp0, -1
897 * sel.f16 dst, {0.0}, tmp0, {1.0}
898 *
899 * SLT(a,b) = (a < b) ? 1.0 : 0.0
900 * cmps.f.ge tmp0, a, b
901 * add.s tmp0, tmp0, -1
902 * sel.f16 dst, {0.0}, tmp0, {1.0}
903 *
904 * CMP(a,b,c) = (a < 0.0) ? b : c
905 * cmps.f.ge tmp0, a, {0.0}
906 * add.s tmp0, tmp0, -1
907 * sel.f16 dst, c, tmp0, b
908 */
909 static void
910 trans_cmp(const struct instr_translater *t,
911 struct fd3_compile_context *ctx,
912 struct tgsi_full_instruction *inst)
913 {
914 struct ir3_instruction *instr;
915 struct tgsi_dst_register tmp_dst;
916 struct tgsi_src_register *tmp_src;
917 struct tgsi_src_register constval0, constval1;
918 /* final instruction for CMP() uses orig src1 and src2: */
919 struct tgsi_dst_register *dst = get_dst(ctx, inst);
920 struct tgsi_src_register *a0, *a1;
921 unsigned condition;
922
923 tmp_src = get_internal_temp(ctx, &tmp_dst);
924
925 switch (t->tgsi_opc) {
926 case TGSI_OPCODE_SEQ:
927 case TGSI_OPCODE_SNE:
928 a0 = &inst->Src[1].Register; /* b */
929 a1 = &inst->Src[0].Register; /* a */
930 condition = IR3_COND_EQ;
931 break;
932 case TGSI_OPCODE_SGE:
933 case TGSI_OPCODE_SLT:
934 a0 = &inst->Src[0].Register; /* a */
935 a1 = &inst->Src[1].Register; /* b */
936 condition = IR3_COND_GE;
937 break;
938 case TGSI_OPCODE_SLE:
939 case TGSI_OPCODE_SGT:
940 a0 = &inst->Src[1].Register; /* b */
941 a1 = &inst->Src[0].Register; /* a */
942 condition = IR3_COND_GE;
943 break;
944 case TGSI_OPCODE_CMP:
945 get_immediate(ctx, &constval0, fui(0.0));
946 a0 = &inst->Src[0].Register; /* a */
947 a1 = &constval0; /* {0.0} */
948 condition = IR3_COND_GE;
949 break;
950 default:
951 compile_assert(ctx, 0);
952 return;
953 }
954
955 if (is_const(a0) && is_const(a1))
956 a0 = get_unconst(ctx, a0);
957
958 /* cmps.f.ge tmp, a0, a1 */
959 instr = instr_create(ctx, 2, OPC_CMPS_F);
960 instr->cat2.condition = condition;
961 vectorize(ctx, instr, &tmp_dst, 2, a0, 0, a1, 0);
962
963 switch (t->tgsi_opc) {
964 case TGSI_OPCODE_SEQ:
965 case TGSI_OPCODE_SGE:
966 case TGSI_OPCODE_SLE:
967 /* cov.u16f16 dst, tmp0 */
968 instr = instr_create(ctx, 1, 0);
969 instr->cat1.src_type = get_utype(ctx);
970 instr->cat1.dst_type = get_ftype(ctx);
971 vectorize(ctx, instr, dst, 1, tmp_src, 0);
972 break;
973 case TGSI_OPCODE_SNE:
974 case TGSI_OPCODE_SGT:
975 case TGSI_OPCODE_SLT:
976 case TGSI_OPCODE_CMP:
977 /* add.s tmp, tmp, -1 */
978 instr = instr_create(ctx, 2, OPC_ADD_S);
979 vectorize(ctx, instr, &tmp_dst, 2, tmp_src, 0, -1, IR3_REG_IMMED);
980
981 if (t->tgsi_opc == TGSI_OPCODE_CMP) {
982 /* sel.{f32,f16} dst, src2, tmp, src1 */
983 instr = instr_create(ctx, 3,
984 ctx->so->key.half_precision ? OPC_SEL_F16 : OPC_SEL_F32);
985 vectorize(ctx, instr, dst, 3,
986 &inst->Src[2].Register, 0,
987 tmp_src, 0,
988 &inst->Src[1].Register, 0);
989 } else {
990 get_immediate(ctx, &constval0, fui(0.0));
991 get_immediate(ctx, &constval1, fui(1.0));
992 /* sel.{f32,f16} dst, {0.0}, tmp0, {1.0} */
993 instr = instr_create(ctx, 3,
994 ctx->so->key.half_precision ? OPC_SEL_F16 : OPC_SEL_F32);
995 vectorize(ctx, instr, dst, 3,
996 &constval0, 0, tmp_src, 0, &constval1, 0);
997 }
998
999 break;
1000 }
1001
1002 put_dst(ctx, inst, dst);
1003 }
1004
1005 /*
1006 * Conditional / Flow control
1007 */
1008
1009 static unsigned
1010 find_instruction(struct fd3_compile_context *ctx, struct ir3_instruction *instr)
1011 {
1012 unsigned i;
1013 for (i = 0; i < ctx->ir->instrs_count; i++)
1014 if (ctx->ir->instrs[i] == instr)
1015 return i;
1016 return ~0;
1017 }
1018
1019 static void
1020 push_branch(struct fd3_compile_context *ctx, struct ir3_instruction *instr)
1021 {
1022 ctx->branch[ctx->branch_count++] = instr;
1023 }
1024
1025 static void
1026 pop_branch(struct fd3_compile_context *ctx)
1027 {
1028 struct ir3_instruction *instr;
1029
1030 /* if we were clever enough, we'd patch this up after the fact,
1031 * and set (jp) flag on whatever the next instruction was, rather
1032 * than inserting an extra nop..
1033 */
1034 instr = instr_create(ctx, 0, OPC_NOP);
1035 instr->flags |= IR3_INSTR_JP;
1036
1037 /* pop the branch instruction from the stack and fix up branch target: */
1038 instr = ctx->branch[--ctx->branch_count];
1039 instr->cat0.immed = ctx->ir->instrs_count - find_instruction(ctx, instr) - 1;
1040 }
1041
1042 /* We probably don't really want to translate if/else/endif into branches..
1043 * the blob driver evaluates both legs of the if and then uses the sel
1044 * instruction to pick which sides of the branch to "keep".. but figuring
1045 * that out will take somewhat more compiler smarts. So hopefully branches
1046 * don't kill performance too badly.
1047 */
1048 static void
1049 trans_if(const struct instr_translater *t,
1050 struct fd3_compile_context *ctx,
1051 struct tgsi_full_instruction *inst)
1052 {
1053 struct ir3_instruction *instr;
1054 struct tgsi_src_register *src = &inst->Src[0].Register;
1055 struct tgsi_src_register constval;
1056
1057 get_immediate(ctx, &constval, fui(0.0));
1058
1059 if (is_const(src))
1060 src = get_unconst(ctx, src);
1061
1062 instr = instr_create(ctx, 2, OPC_CMPS_F);
1063 ir3_reg_create(instr, regid(REG_P0, 0), 0);
1064 add_src_reg(ctx, instr, src, src->SwizzleX);
1065 add_src_reg(ctx, instr, &constval, constval.SwizzleX);
1066 instr->cat2.condition = IR3_COND_EQ;
1067
1068 instr = instr_create(ctx, 0, OPC_BR);
1069 push_branch(ctx, instr);
1070 }
1071
1072 static void
1073 trans_else(const struct instr_translater *t,
1074 struct fd3_compile_context *ctx,
1075 struct tgsi_full_instruction *inst)
1076 {
1077 struct ir3_instruction *instr;
1078
1079 /* for first half of if/else/endif, generate a jump past the else: */
1080 instr = instr_create(ctx, 0, OPC_JUMP);
1081
1082 pop_branch(ctx);
1083 push_branch(ctx, instr);
1084 }
1085
1086 static void
1087 trans_endif(const struct instr_translater *t,
1088 struct fd3_compile_context *ctx,
1089 struct tgsi_full_instruction *inst)
1090 {
1091 pop_branch(ctx);
1092 }
1093
1094 /*
1095 * Handlers for TGSI instructions which do have 1:1 mapping to native
1096 * instructions:
1097 */
1098
1099 static void
1100 instr_cat0(const struct instr_translater *t,
1101 struct fd3_compile_context *ctx,
1102 struct tgsi_full_instruction *inst)
1103 {
1104 instr_create(ctx, 0, t->opc);
1105 }
1106
1107 static void
1108 instr_cat1(const struct instr_translater *t,
1109 struct fd3_compile_context *ctx,
1110 struct tgsi_full_instruction *inst)
1111 {
1112 struct tgsi_dst_register *dst = get_dst(ctx, inst);
1113 struct tgsi_src_register *src = &inst->Src[0].Register;
1114
1115 /* mov instructions can't handle a negate on src: */
1116 if (src->Negate) {
1117 struct tgsi_src_register constval;
1118 struct ir3_instruction *instr;
1119
1120 /* since right now, we are using uniformly either TYPE_F16 or
1121 * TYPE_F32, and we don't utilize the conversion possibilities
1122 * of mov instructions, we can get away with substituting an
1123 * add.f which can handle negate. Might need to revisit this
1124 * in the future if we start supporting widening/narrowing or
1125 * conversion to/from integer..
1126 */
1127 instr = instr_create(ctx, 2, OPC_ADD_F);
1128 get_immediate(ctx, &constval, fui(0.0));
1129 vectorize(ctx, instr, dst, 2, src, 0, &constval, 0);
1130 } else {
1131 create_mov(ctx, dst, src);
1132 /* create_mov() generates vector sequence, so no vectorize() */
1133 }
1134 put_dst(ctx, inst, dst);
1135 }
1136
1137 static void
1138 instr_cat2(const struct instr_translater *t,
1139 struct fd3_compile_context *ctx,
1140 struct tgsi_full_instruction *inst)
1141 {
1142 struct tgsi_dst_register *dst = get_dst(ctx, inst);
1143 struct tgsi_src_register *src0 = &inst->Src[0].Register;
1144 struct tgsi_src_register *src1 = &inst->Src[1].Register;
1145 struct ir3_instruction *instr;
1146 unsigned src0_flags = 0, src1_flags = 0;
1147
1148 switch (t->tgsi_opc) {
1149 case TGSI_OPCODE_ABS:
1150 src0_flags = IR3_REG_ABS;
1151 break;
1152 case TGSI_OPCODE_SUB:
1153 src1_flags = IR3_REG_NEGATE;
1154 break;
1155 }
1156
1157 switch (t->opc) {
1158 case OPC_ABSNEG_F:
1159 case OPC_ABSNEG_S:
1160 case OPC_CLZ_B:
1161 case OPC_CLZ_S:
1162 case OPC_SIGN_F:
1163 case OPC_FLOOR_F:
1164 case OPC_CEIL_F:
1165 case OPC_RNDNE_F:
1166 case OPC_RNDAZ_F:
1167 case OPC_TRUNC_F:
1168 case OPC_NOT_B:
1169 case OPC_BFREV_B:
1170 case OPC_SETRM:
1171 case OPC_CBITS_B:
1172 /* these only have one src reg */
1173 instr = instr_create(ctx, 2, t->opc);
1174 vectorize(ctx, instr, dst, 1, src0, src0_flags);
1175 break;
1176 default:
1177 if (is_const(src0) && is_const(src1))
1178 src0 = get_unconst(ctx, src0);
1179
1180 instr = instr_create(ctx, 2, t->opc);
1181 vectorize(ctx, instr, dst, 2, src0, src0_flags,
1182 src1, src1_flags);
1183 break;
1184 }
1185
1186 put_dst(ctx, inst, dst);
1187 }
1188
1189 static void
1190 instr_cat3(const struct instr_translater *t,
1191 struct fd3_compile_context *ctx,
1192 struct tgsi_full_instruction *inst)
1193 {
1194 struct tgsi_dst_register *dst = get_dst(ctx, inst);
1195 struct tgsi_src_register *src0 = &inst->Src[0].Register;
1196 struct tgsi_src_register *src1 = &inst->Src[1].Register;
1197 struct ir3_instruction *instr;
1198
1199 /* in particular, can't handle const for src1 for cat3..
1200 * for mad, we can swap first two src's if needed:
1201 */
1202 if (is_rel_or_const(src1)) {
1203 if (is_mad(t->opc) && !is_rel_or_const(src0)) {
1204 struct tgsi_src_register *tmp;
1205 tmp = src0;
1206 src0 = src1;
1207 src1 = tmp;
1208 } else {
1209 src1 = get_unconst(ctx, src1);
1210 }
1211 }
1212
1213 instr = instr_create(ctx, 3,
1214 ctx->so->key.half_precision ? t->hopc : t->opc);
1215 vectorize(ctx, instr, dst, 3, src0, 0, src1, 0,
1216 &inst->Src[2].Register, 0);
1217 put_dst(ctx, inst, dst);
1218 }
1219
1220 static void
1221 instr_cat4(const struct instr_translater *t,
1222 struct fd3_compile_context *ctx,
1223 struct tgsi_full_instruction *inst)
1224 {
1225 struct tgsi_dst_register *dst = get_dst(ctx, inst);
1226 struct tgsi_src_register *src = &inst->Src[0].Register;
1227 struct ir3_instruction *instr;
1228 unsigned i, n;
1229
1230 /* seems like blob compiler avoids const as src.. */
1231 if (is_const(src))
1232 src = get_unconst(ctx, src);
1233
1234 /* worst case: */
1235 add_nop(ctx, 6);
1236
1237 /* we need to replicate into each component: */
1238 for (i = 0, n = 0; i < 4; i++) {
1239 if (dst->WriteMask & (1 << i)) {
1240 if (n++)
1241 add_nop(ctx, 1);
1242 instr = instr_create(ctx, 4, t->opc);
1243 add_dst_reg(ctx, instr, dst, i);
1244 add_src_reg(ctx, instr, src, src->SwizzleX);
1245 }
1246 }
1247
1248 regmask_set(&ctx->needs_ss, instr->regs[0]);
1249 put_dst(ctx, inst, dst);
1250 }
1251
1252 static const struct instr_translater translaters[TGSI_OPCODE_LAST] = {
1253 #define INSTR(n, f, ...) \
1254 [TGSI_OPCODE_ ## n] = { .fxn = (f), .tgsi_opc = TGSI_OPCODE_ ## n, ##__VA_ARGS__ }
1255
1256 INSTR(MOV, instr_cat1),
1257 INSTR(RCP, instr_cat4, .opc = OPC_RCP),
1258 INSTR(RSQ, instr_cat4, .opc = OPC_RSQ),
1259 INSTR(SQRT, instr_cat4, .opc = OPC_SQRT),
1260 INSTR(MUL, instr_cat2, .opc = OPC_MUL_F),
1261 INSTR(ADD, instr_cat2, .opc = OPC_ADD_F),
1262 INSTR(SUB, instr_cat2, .opc = OPC_ADD_F),
1263 INSTR(MIN, instr_cat2, .opc = OPC_MIN_F),
1264 INSTR(MAX, instr_cat2, .opc = OPC_MAX_F),
1265 INSTR(MAD, instr_cat3, .opc = OPC_MAD_F32, .hopc = OPC_MAD_F16),
1266 INSTR(TRUNC, instr_cat2, .opc = OPC_TRUNC_F),
1267 INSTR(CLAMP, trans_clamp),
1268 INSTR(FLR, instr_cat2, .opc = OPC_FLOOR_F),
1269 INSTR(ROUND, instr_cat2, .opc = OPC_RNDNE_F),
1270 INSTR(SSG, instr_cat2, .opc = OPC_SIGN_F),
1271 INSTR(ARL, trans_arl),
1272 INSTR(EX2, instr_cat4, .opc = OPC_EXP2),
1273 INSTR(LG2, instr_cat4, .opc = OPC_LOG2),
1274 INSTR(ABS, instr_cat2, .opc = OPC_ABSNEG_F),
1275 INSTR(COS, instr_cat4, .opc = OPC_COS),
1276 INSTR(SIN, instr_cat4, .opc = OPC_SIN),
1277 INSTR(TEX, trans_samp, .opc = OPC_SAM, .arg = TGSI_OPCODE_TEX),
1278 INSTR(TXP, trans_samp, .opc = OPC_SAM, .arg = TGSI_OPCODE_TXP),
1279 INSTR(SGT, trans_cmp),
1280 INSTR(SLT, trans_cmp),
1281 INSTR(SGE, trans_cmp),
1282 INSTR(SLE, trans_cmp),
1283 INSTR(SNE, trans_cmp),
1284 INSTR(SEQ, trans_cmp),
1285 INSTR(CMP, trans_cmp),
1286 INSTR(IF, trans_if),
1287 INSTR(ELSE, trans_else),
1288 INSTR(ENDIF, trans_endif),
1289 INSTR(END, instr_cat0, .opc = OPC_END),
1290 INSTR(KILL, instr_cat0, .opc = OPC_KILL),
1291 };
1292
1293 static fd3_semantic
1294 decl_semantic(const struct tgsi_declaration_semantic *sem)
1295 {
1296 return fd3_semantic_name(sem->Name, sem->Index);
1297 }
1298
1299 static int
1300 decl_in(struct fd3_compile_context *ctx, struct tgsi_full_declaration *decl)
1301 {
1302 struct fd3_shader_variant *so = ctx->so;
1303 unsigned base = ctx->base_reg[TGSI_FILE_INPUT];
1304 unsigned i, flags = 0;
1305 int nop = 0;
1306
1307 /* I don't think we should get frag shader input without
1308 * semantic info? Otherwise how do inputs get linked to
1309 * vert outputs?
1310 */
1311 compile_assert(ctx, (ctx->type == TGSI_PROCESSOR_VERTEX) ||
1312 decl->Declaration.Semantic);
1313
1314 if (ctx->so->key.half_precision)
1315 flags |= IR3_REG_HALF;
1316
1317 for (i = decl->Range.First; i <= decl->Range.Last; i++) {
1318 unsigned n = so->inputs_count++;
1319 unsigned r = regid(i + base, 0);
1320 unsigned ncomp;
1321
1322 /* TODO use ctx->info.input_usage_mask[decl->Range.n] to figure out ncomp: */
1323 ncomp = 4;
1324
1325 DBG("decl in -> r%d", i + base); // XXX
1326
1327 compile_assert(ctx, n < ARRAY_SIZE(so->inputs));
1328
1329 so->inputs[n].semantic = decl_semantic(&decl->Semantic);
1330 so->inputs[n].compmask = (1 << ncomp) - 1;
1331 so->inputs[n].ncomp = ncomp;
1332 so->inputs[n].regid = r;
1333 so->inputs[n].inloc = ctx->next_inloc;
1334 so->inputs[n].bary = true; /* all that is supported */
1335 ctx->next_inloc += ncomp;
1336
1337 so->total_in += ncomp;
1338
1339 /* for frag shaders, we need to generate the corresponding bary instr: */
1340 if (ctx->type == TGSI_PROCESSOR_FRAGMENT) {
1341 unsigned j;
1342
1343 for (j = 0; j < ncomp; j++) {
1344 struct ir3_instruction *instr;
1345 struct ir3_register *dst;
1346
1347 instr = instr_create(ctx, 2, OPC_BARY_F);
1348
1349 /* dst register: */
1350 dst = ir3_reg_create(instr, r + j, flags);
1351 ctx->last_input = dst;
1352
1353 /* input position: */
1354 ir3_reg_create(instr, 0, IR3_REG_IMMED)->iim_val =
1355 so->inputs[n].inloc + j - 8;
1356
1357 /* input base (always r0.xy): */
1358 ir3_reg_create(instr, regid(0,0), 0)->wrmask = 0x3;
1359 }
1360
1361 nop = 6;
1362 }
1363 }
1364
1365 return nop;
1366 }
1367
1368 static void
1369 decl_out(struct fd3_compile_context *ctx, struct tgsi_full_declaration *decl)
1370 {
1371 struct fd3_shader_variant *so = ctx->so;
1372 unsigned base = ctx->base_reg[TGSI_FILE_OUTPUT];
1373 unsigned comp = 0;
1374 unsigned name = decl->Semantic.Name;
1375 unsigned i;
1376
1377 compile_assert(ctx, decl->Declaration.Semantic); // TODO is this ever not true?
1378
1379 DBG("decl out[%d] -> r%d", name, decl->Range.First + base); // XXX
1380
1381 if (ctx->type == TGSI_PROCESSOR_VERTEX) {
1382 switch (name) {
1383 case TGSI_SEMANTIC_POSITION:
1384 so->writes_pos = true;
1385 break;
1386 case TGSI_SEMANTIC_PSIZE:
1387 so->writes_psize = true;
1388 break;
1389 case TGSI_SEMANTIC_COLOR:
1390 case TGSI_SEMANTIC_BCOLOR:
1391 case TGSI_SEMANTIC_GENERIC:
1392 case TGSI_SEMANTIC_FOG:
1393 case TGSI_SEMANTIC_TEXCOORD:
1394 break;
1395 default:
1396 compile_error(ctx, "unknown VS semantic name: %s\n",
1397 tgsi_semantic_names[name]);
1398 }
1399 } else {
1400 switch (name) {
1401 case TGSI_SEMANTIC_POSITION:
1402 comp = 2; /* tgsi will write to .z component */
1403 so->writes_pos = true;
1404 break;
1405 case TGSI_SEMANTIC_COLOR:
1406 break;
1407 default:
1408 compile_error(ctx, "unknown FS semantic name: %s\n",
1409 tgsi_semantic_names[name]);
1410 }
1411 }
1412
1413 for (i = decl->Range.First; i <= decl->Range.Last; i++) {
1414 unsigned n = so->outputs_count++;
1415 compile_assert(ctx, n < ARRAY_SIZE(so->outputs));
1416 so->outputs[n].semantic = decl_semantic(&decl->Semantic);
1417 so->outputs[n].regid = regid(i + base, comp);
1418 }
1419 }
1420
1421 static void
1422 decl_samp(struct fd3_compile_context *ctx, struct tgsi_full_declaration *decl)
1423 {
1424 ctx->so->has_samp = true;
1425 }
1426
1427 static void
1428 compile_instructions(struct fd3_compile_context *ctx)
1429 {
1430 struct ir3 *ir = ctx->ir;
1431 int nop = 0;
1432
1433 while (!tgsi_parse_end_of_tokens(&ctx->parser)) {
1434 tgsi_parse_token(&ctx->parser);
1435
1436 switch (ctx->parser.FullToken.Token.Type) {
1437 case TGSI_TOKEN_TYPE_DECLARATION: {
1438 struct tgsi_full_declaration *decl =
1439 &ctx->parser.FullToken.FullDeclaration;
1440 if (decl->Declaration.File == TGSI_FILE_OUTPUT) {
1441 decl_out(ctx, decl);
1442 } else if (decl->Declaration.File == TGSI_FILE_INPUT) {
1443 nop = decl_in(ctx, decl);
1444 } else if (decl->Declaration.File == TGSI_FILE_SAMPLER) {
1445 decl_samp(ctx, decl);
1446 }
1447 break;
1448 }
1449 case TGSI_TOKEN_TYPE_IMMEDIATE: {
1450 /* TODO: if we know the immediate is small enough, and only
1451 * used with instructions that can embed an immediate, we
1452 * can skip this:
1453 */
1454 struct tgsi_full_immediate *imm =
1455 &ctx->parser.FullToken.FullImmediate;
1456 unsigned n = ctx->so->immediates_count++;
1457 memcpy(ctx->so->immediates[n].val, imm->u, 16);
1458 break;
1459 }
1460 case TGSI_TOKEN_TYPE_INSTRUCTION: {
1461 struct tgsi_full_instruction *inst =
1462 &ctx->parser.FullToken.FullInstruction;
1463 unsigned opc = inst->Instruction.Opcode;
1464 const struct instr_translater *t = &translaters[opc];
1465
1466 add_nop(ctx, nop);
1467 nop = 0;
1468
1469 if (t->fxn) {
1470 t->fxn(t, ctx, inst);
1471 ctx->num_internal_temps = 0;
1472 } else {
1473 compile_error(ctx, "unknown TGSI opc: %s\n",
1474 tgsi_get_opcode_name(opc));
1475 }
1476
1477 switch (inst->Instruction.Saturate) {
1478 case TGSI_SAT_ZERO_ONE:
1479 create_clamp_imm(ctx, &inst->Dst[0].Register,
1480 fui(0.0), fui(1.0));
1481 break;
1482 case TGSI_SAT_MINUS_PLUS_ONE:
1483 create_clamp_imm(ctx, &inst->Dst[0].Register,
1484 fui(-1.0), fui(1.0));
1485 break;
1486 }
1487
1488 break;
1489 }
1490 default:
1491 break;
1492 }
1493 }
1494
1495 if (ir->instrs_count > 0)
1496 ir->instrs[0]->flags |= IR3_INSTR_SS | IR3_INSTR_SY;
1497
1498 if (ctx->last_input)
1499 ctx->last_input->flags |= IR3_REG_EI;
1500
1501 handle_last_rel(ctx);
1502 }
1503
1504 int
1505 fd3_compile_shader_old(struct fd3_shader_variant *so,
1506 const struct tgsi_token *tokens, struct fd3_shader_key key)
1507 {
1508 struct fd3_compile_context ctx;
1509
1510 assert(!so->ir);
1511
1512 so->ir = ir3_create();
1513
1514 assert(so->ir);
1515
1516 if (compile_init(&ctx, so, tokens) != TGSI_PARSE_OK)
1517 return -1;
1518
1519 compile_instructions(&ctx);
1520
1521 compile_free(&ctx);
1522
1523 return 0;
1524 }