freedreno/ir3: add support for FS_COLOR0_WRITES_ALL_CBUFS property
[mesa.git] / src / gallium / drivers / freedreno / ir3 / ir3_compiler.c
1 /* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
2
3 /*
4 * Copyright (C) 2013 Rob Clark <robclark@freedesktop.org>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * SOFTWARE.
24 *
25 * Authors:
26 * Rob Clark <robclark@freedesktop.org>
27 */
28
29 #include <stdarg.h>
30
31 #include "pipe/p_state.h"
32 #include "util/u_string.h"
33 #include "util/u_memory.h"
34 #include "util/u_inlines.h"
35 #include "tgsi/tgsi_lowering.h"
36 #include "tgsi/tgsi_parse.h"
37 #include "tgsi/tgsi_ureg.h"
38 #include "tgsi/tgsi_info.h"
39 #include "tgsi/tgsi_strings.h"
40 #include "tgsi/tgsi_dump.h"
41 #include "tgsi/tgsi_scan.h"
42
43 #include "freedreno_util.h"
44
45 #include "ir3_compiler.h"
46 #include "ir3_shader.h"
47
48 #include "instr-a3xx.h"
49 #include "ir3.h"
50
51 struct ir3_compile_context {
52 const struct tgsi_token *tokens;
53 bool free_tokens;
54 struct ir3 *ir;
55 struct ir3_shader_variant *so;
56 uint16_t integer_s;
57
58 struct ir3_block *block;
59 struct ir3_instruction *current_instr;
60
61 /* we need to defer updates to block->outputs[] until the end
62 * of an instruction (so we don't see new value until *after*
63 * the src registers are processed)
64 */
65 struct {
66 struct ir3_instruction *instr, **instrp;
67 } output_updates[64];
68 unsigned num_output_updates;
69
70 /* are we in a sequence of "atomic" instructions?
71 */
72 bool atomic;
73
74 /* For fragment shaders, from the hw perspective the only
75 * actual input is r0.xy position register passed to bary.f.
76 * But TGSI doesn't know that, it still declares things as
77 * IN[] registers. So we do all the input tracking normally
78 * and fix things up after compile_instructions()
79 *
80 * NOTE that frag_pos is the hardware position (possibly it
81 * is actually an index or tag or some such.. it is *not*
82 * values that can be directly used for gl_FragCoord..)
83 */
84 struct ir3_instruction *frag_pos, *frag_face, *frag_coord[4];
85
86 /* For vertex shaders, keep track of the system values sources */
87 struct ir3_instruction *vertex_id, *basevertex, *instance_id;
88
89 struct tgsi_parse_context parser;
90 unsigned type;
91
92 struct tgsi_shader_info info;
93
94 /* hmm, would be nice if tgsi_scan_shader figured this out
95 * for us:
96 */
97 struct {
98 unsigned first, last;
99 struct ir3_instruction *fanin;
100 } array[MAX_ARRAYS];
101 uint32_t array_dirty;
102 /* offset into array[], per file, of first array info */
103 uint8_t array_offsets[TGSI_FILE_COUNT];
104
105 /* for calculating input/output positions/linkages: */
106 unsigned next_inloc;
107
108 /* a4xx (at least patchlevel 0) cannot seem to flat-interpolate
109 * so we need to use ldlv.u32 to load the varying directly:
110 */
111 bool flat_bypass;
112
113 unsigned num_internal_temps;
114 struct tgsi_src_register internal_temps[8];
115
116 /* for looking up which system value is which */
117 unsigned sysval_semantics[8];
118
119 /* idx/slot for last compiler generated immediate */
120 unsigned immediate_idx;
121
122 /* stack of branch instructions that mark (potentially nested)
123 * branch if/else/loop/etc
124 */
125 struct {
126 struct ir3_instruction *instr, *cond;
127 bool inv; /* true iff in else leg of branch */
128 } branch[16];
129 unsigned int branch_count;
130
131 /* list of kill instructions: */
132 struct ir3_instruction *kill[16];
133 unsigned int kill_count;
134
135 /* used when dst is same as one of the src, to avoid overwriting a
136 * src element before the remaining scalar instructions that make
137 * up the vector operation
138 */
139 struct tgsi_dst_register tmp_dst;
140 struct tgsi_src_register *tmp_src;
141
142 /* just for catching incorrect use of get_dst()/put_dst():
143 */
144 bool using_tmp_dst;
145 };
146
147
148 static void vectorize(struct ir3_compile_context *ctx,
149 struct ir3_instruction *instr, struct tgsi_dst_register *dst,
150 int nsrcs, ...);
151 static void create_mov(struct ir3_compile_context *ctx,
152 struct tgsi_dst_register *dst, struct tgsi_src_register *src);
153 static type_t get_ftype(struct ir3_compile_context *ctx);
154
155 static unsigned setup_arrays(struct ir3_compile_context *ctx, unsigned file, unsigned i)
156 {
157 /* ArrayID 0 for a given file is the legacy array spanning the entire file: */
158 ctx->array[i].first = 0;
159 ctx->array[i].last = ctx->info.file_max[file];
160 ctx->array_offsets[file] = i;
161 i += ctx->info.array_max[file] + 1;
162 return i;
163 }
164
165 static unsigned
166 compile_init(struct ir3_compile_context *ctx, struct ir3_shader_variant *so,
167 const struct tgsi_token *tokens)
168 {
169 unsigned ret, i;
170 struct tgsi_shader_info *info = &ctx->info;
171 struct tgsi_lowering_config lconfig = {
172 .color_two_side = so->key.color_two_side,
173 .lower_DST = true,
174 .lower_XPD = true,
175 .lower_SCS = true,
176 .lower_LRP = true,
177 .lower_FRC = true,
178 .lower_POW = true,
179 .lower_LIT = true,
180 .lower_EXP = true,
181 .lower_LOG = true,
182 .lower_DP4 = true,
183 .lower_DP3 = true,
184 .lower_DPH = true,
185 .lower_DP2 = true,
186 .lower_DP2A = true,
187 };
188
189 switch (so->type) {
190 case SHADER_FRAGMENT:
191 case SHADER_COMPUTE:
192 lconfig.saturate_s = so->key.fsaturate_s;
193 lconfig.saturate_t = so->key.fsaturate_t;
194 lconfig.saturate_r = so->key.fsaturate_r;
195 ctx->integer_s = so->key.finteger_s;
196 break;
197 case SHADER_VERTEX:
198 lconfig.saturate_s = so->key.vsaturate_s;
199 lconfig.saturate_t = so->key.vsaturate_t;
200 lconfig.saturate_r = so->key.vsaturate_r;
201 ctx->integer_s = so->key.vinteger_s;
202 break;
203 }
204
205 if (!so->shader) {
206 /* hack for standalone compiler which does not have
207 * screen/context:
208 */
209 } else if (ir3_shader_gpuid(so->shader) >= 400) {
210 /* a4xx seems to have *no* sam.p */
211 lconfig.lower_TXP = ~0; /* lower all txp */
212 /* need special handling for "flat" */
213 ctx->flat_bypass = true;
214 } else {
215 /* a3xx just needs to avoid sam.p for 3d tex */
216 lconfig.lower_TXP = (1 << TGSI_TEXTURE_3D);
217 /* no special handling for "flat" */
218 ctx->flat_bypass = false;
219 }
220
221 ctx->tokens = tgsi_transform_lowering(&lconfig, tokens, &ctx->info);
222 ctx->free_tokens = !!ctx->tokens;
223 if (!ctx->tokens) {
224 /* no lowering */
225 ctx->tokens = tokens;
226 }
227 ctx->ir = so->ir;
228 ctx->so = so;
229 ctx->array_dirty = 0;
230 ctx->next_inloc = 8;
231 ctx->num_internal_temps = 0;
232 ctx->branch_count = 0;
233 ctx->kill_count = 0;
234 ctx->block = NULL;
235 ctx->current_instr = NULL;
236 ctx->num_output_updates = 0;
237 ctx->atomic = false;
238 ctx->frag_pos = NULL;
239 ctx->frag_face = NULL;
240 ctx->vertex_id = NULL;
241 ctx->instance_id = NULL;
242 ctx->tmp_src = NULL;
243 ctx->using_tmp_dst = false;
244
245 memset(ctx->frag_coord, 0, sizeof(ctx->frag_coord));
246 memset(ctx->array, 0, sizeof(ctx->array));
247 memset(ctx->array_offsets, 0, sizeof(ctx->array_offsets));
248
249 #define FM(x) (1 << TGSI_FILE_##x)
250 /* NOTE: if relative addressing is used, we set constlen in
251 * the compiler (to worst-case value) since we don't know in
252 * the assembler what the max addr reg value can be:
253 */
254 if (info->indirect_files & FM(CONSTANT))
255 so->constlen = ctx->info.file_max[TGSI_FILE_CONSTANT] + 1;
256
257 i = 0;
258 i += setup_arrays(ctx, TGSI_FILE_INPUT, i);
259 i += setup_arrays(ctx, TGSI_FILE_TEMPORARY, i);
260 i += setup_arrays(ctx, TGSI_FILE_OUTPUT, i);
261 /* any others? we don't track arrays for const..*/
262
263 /* Immediates go after constants: */
264 if (so->type == SHADER_VERTEX) {
265 so->first_driver_param = info->file_max[TGSI_FILE_CONSTANT] + 1;
266 so->first_immediate = so->first_driver_param + 1;
267 } else {
268 so->first_immediate = info->file_max[TGSI_FILE_CONSTANT] + 1;
269 }
270 ctx->immediate_idx = 4 * (ctx->info.file_max[TGSI_FILE_IMMEDIATE] + 1);
271
272 ret = tgsi_parse_init(&ctx->parser, ctx->tokens);
273 if (ret != TGSI_PARSE_OK)
274 return ret;
275
276 ctx->type = ctx->parser.FullHeader.Processor.Processor;
277
278 return ret;
279 }
280
281 static void
282 compile_error(struct ir3_compile_context *ctx, const char *format, ...)
283 {
284 va_list ap;
285 va_start(ap, format);
286 _debug_vprintf(format, ap);
287 va_end(ap);
288 tgsi_dump(ctx->tokens, 0);
289 debug_assert(0);
290 }
291
292 #define compile_assert(ctx, cond) do { \
293 if (!(cond)) compile_error((ctx), "failed assert: "#cond"\n"); \
294 } while (0)
295
296 static void
297 compile_free(struct ir3_compile_context *ctx)
298 {
299 if (ctx->free_tokens)
300 free((void *)ctx->tokens);
301 tgsi_parse_free(&ctx->parser);
302 }
303
304 struct instr_translater {
305 void (*fxn)(const struct instr_translater *t,
306 struct ir3_compile_context *ctx,
307 struct tgsi_full_instruction *inst);
308 unsigned tgsi_opc;
309 opc_t opc;
310 opc_t hopc; /* opc to use for half_precision mode, if different */
311 unsigned arg;
312 };
313
314 static void
315 instr_finish(struct ir3_compile_context *ctx)
316 {
317 unsigned i;
318
319 if (ctx->atomic)
320 return;
321
322 for (i = 0; i < ctx->num_output_updates; i++)
323 *(ctx->output_updates[i].instrp) = ctx->output_updates[i].instr;
324
325 ctx->num_output_updates = 0;
326
327 while (ctx->array_dirty) {
328 unsigned aid = ffs(ctx->array_dirty) - 1;
329 ctx->array[aid].fanin = NULL;
330 ctx->array_dirty &= ~(1 << aid);
331 }
332 }
333
334 /* For "atomic" groups of instructions, for example the four scalar
335 * instructions to perform a vec4 operation. Basically this just
336 * blocks out handling of output_updates so the next scalar instruction
337 * still sees the result from before the start of the atomic group.
338 *
339 * NOTE: when used properly, this could probably replace get/put_dst()
340 * stuff.
341 */
342 static void
343 instr_atomic_start(struct ir3_compile_context *ctx)
344 {
345 ctx->atomic = true;
346 }
347
348 static void
349 instr_atomic_end(struct ir3_compile_context *ctx)
350 {
351 ctx->atomic = false;
352 instr_finish(ctx);
353 }
354
355 static struct ir3_instruction *
356 instr_create(struct ir3_compile_context *ctx, int category, opc_t opc)
357 {
358 instr_finish(ctx);
359 return (ctx->current_instr = ir3_instr_create(ctx->block, category, opc));
360 }
361
362 static struct ir3_block *
363 push_block(struct ir3_compile_context *ctx)
364 {
365 struct ir3_block *block;
366 unsigned ntmp, nin, nout;
367
368 #define SCALAR_REGS(file) (4 * (ctx->info.file_max[TGSI_FILE_ ## file] + 1))
369
370 /* hmm, give ourselves room to create 8 extra temporaries (vec4):
371 */
372 ntmp = SCALAR_REGS(TEMPORARY);
373 ntmp += 8 * 4;
374
375 nout = SCALAR_REGS(OUTPUT);
376 nin = SCALAR_REGS(INPUT) + SCALAR_REGS(SYSTEM_VALUE);
377
378 /* for outermost block, 'inputs' are the actual shader INPUT
379 * register file. Reads from INPUT registers always go back to
380 * top block. For nested blocks, 'inputs' is used to track any
381 * TEMPORARY file register from one of the enclosing blocks that
382 * is ready in this block.
383 */
384 if (!ctx->block) {
385 /* NOTE: fragment shaders actually have two inputs (r0.xy, the
386 * position)
387 */
388 if (ctx->type == TGSI_PROCESSOR_FRAGMENT) {
389 int n = 2;
390 if (ctx->info.reads_position)
391 n += 4;
392 if (ctx->info.uses_frontface)
393 n += 4;
394 nin = MAX2(n, nin);
395 nout += ARRAY_SIZE(ctx->kill);
396 }
397 } else {
398 nin = ntmp;
399 }
400
401 block = ir3_block_create(ctx->ir, ntmp, nin, nout);
402
403 if ((ctx->type == TGSI_PROCESSOR_FRAGMENT) && !ctx->block)
404 block->noutputs -= ARRAY_SIZE(ctx->kill);
405
406 block->parent = ctx->block;
407 ctx->block = block;
408
409 return block;
410 }
411
412 static void
413 pop_block(struct ir3_compile_context *ctx)
414 {
415 ctx->block = ctx->block->parent;
416 compile_assert(ctx, ctx->block);
417 }
418
419 static struct ir3_instruction *
420 create_output(struct ir3_block *block, struct ir3_instruction *instr,
421 unsigned n)
422 {
423 struct ir3_instruction *out;
424
425 out = ir3_instr_create(block, -1, OPC_META_OUTPUT);
426 out->inout.block = block;
427 ir3_reg_create(out, n, 0);
428 if (instr)
429 ir3_reg_create(out, 0, IR3_REG_SSA)->instr = instr;
430
431 return out;
432 }
433
434 static struct ir3_instruction *
435 create_input(struct ir3_block *block, struct ir3_instruction *instr,
436 unsigned n)
437 {
438 struct ir3_instruction *in;
439
440 in = ir3_instr_create(block, -1, OPC_META_INPUT);
441 in->inout.block = block;
442 ir3_reg_create(in, n, 0);
443 if (instr)
444 ir3_reg_create(in, 0, IR3_REG_SSA)->instr = instr;
445
446 return in;
447 }
448
449 static struct ir3_instruction *
450 block_input(struct ir3_block *block, unsigned n)
451 {
452 /* references to INPUT register file always go back up to
453 * top level:
454 */
455 if (block->parent)
456 return block_input(block->parent, n);
457 return block->inputs[n];
458 }
459
460 /* return temporary in scope, creating if needed meta-input node
461 * to track block inputs
462 */
463 static struct ir3_instruction *
464 block_temporary(struct ir3_block *block, unsigned n)
465 {
466 /* references to TEMPORARY register file, find the nearest
467 * enclosing block which has already assigned this temporary,
468 * creating meta-input instructions along the way to keep
469 * track of block inputs
470 */
471 if (block->parent && !block->temporaries[n]) {
472 /* if already have input for this block, reuse: */
473 if (!block->inputs[n])
474 block->inputs[n] = block_temporary(block->parent, n);
475
476 /* and create new input to return: */
477 return create_input(block, block->inputs[n], n);
478 }
479 return block->temporaries[n];
480 }
481
482 static struct ir3_instruction *
483 create_immed(struct ir3_compile_context *ctx, float val)
484 {
485 /* NOTE: *don't* use instr_create() here!
486 */
487 struct ir3_instruction *instr;
488 instr = ir3_instr_create(ctx->block, 1, 0);
489 instr->cat1.src_type = get_ftype(ctx);
490 instr->cat1.dst_type = get_ftype(ctx);
491 ir3_reg_create(instr, 0, 0);
492 ir3_reg_create(instr, 0, IR3_REG_IMMED)->fim_val = val;
493 return instr;
494 }
495
496 static void
497 ssa_instr_set(struct ir3_compile_context *ctx, unsigned file, unsigned n,
498 struct ir3_instruction *instr)
499 {
500 struct ir3_block *block = ctx->block;
501 unsigned idx = ctx->num_output_updates;
502
503 compile_assert(ctx, idx < ARRAY_SIZE(ctx->output_updates));
504
505 /* NOTE: defer update of temporaries[idx] or output[idx]
506 * until instr_finish(), so that if the current instruction
507 * reads the same TEMP/OUT[] it gets the old value:
508 *
509 * bleh.. this might be a bit easier to just figure out
510 * in instr_finish(). But at that point we've already
511 * lost information about OUTPUT vs TEMPORARY register
512 * file..
513 */
514
515 switch (file) {
516 case TGSI_FILE_OUTPUT:
517 compile_assert(ctx, n < block->noutputs);
518 ctx->output_updates[idx].instrp = &block->outputs[n];
519 ctx->output_updates[idx].instr = instr;
520 ctx->num_output_updates++;
521 break;
522 case TGSI_FILE_TEMPORARY:
523 compile_assert(ctx, n < block->ntemporaries);
524 ctx->output_updates[idx].instrp = &block->temporaries[n];
525 ctx->output_updates[idx].instr = instr;
526 ctx->num_output_updates++;
527 break;
528 case TGSI_FILE_ADDRESS:
529 compile_assert(ctx, n < 1);
530 ctx->output_updates[idx].instrp = &block->address;
531 ctx->output_updates[idx].instr = instr;
532 ctx->num_output_updates++;
533 break;
534 }
535 }
536
537 static struct ir3_instruction *
538 ssa_instr_get(struct ir3_compile_context *ctx, unsigned file, unsigned n)
539 {
540 struct ir3_block *block = ctx->block;
541 struct ir3_instruction *instr = NULL;
542
543 switch (file) {
544 case TGSI_FILE_INPUT:
545 instr = block_input(ctx->block, n);
546 break;
547 case TGSI_FILE_OUTPUT:
548 /* really this should just happen in case of 'MOV_SAT OUT[n], ..',
549 * for the following clamp instructions:
550 */
551 instr = block->outputs[n];
552 /* we don't have to worry about read from an OUTPUT that was
553 * assigned outside of the current block, because the _SAT
554 * clamp instructions will always be in the same block as
555 * the original instruction which wrote the OUTPUT
556 */
557 compile_assert(ctx, instr);
558 break;
559 case TGSI_FILE_TEMPORARY:
560 instr = block_temporary(ctx->block, n);
561 if (!instr) {
562 /* this can happen when registers (or components of a TGSI
563 * register) are used as src before they have been assigned
564 * (undefined contents). To avoid confusing the rest of the
565 * compiler, and to generally keep things peachy, substitute
566 * an instruction that sets the src to 0.0. Or to keep
567 * things undefined, I could plug in a random number? :-P
568 *
569 * NOTE: *don't* use instr_create() here!
570 */
571 instr = create_immed(ctx, 0.0);
572 /* no need to recreate the immed for every access: */
573 block->temporaries[n] = instr;
574 }
575 break;
576 case TGSI_FILE_SYSTEM_VALUE:
577 switch (ctx->sysval_semantics[n >> 2]) {
578 case TGSI_SEMANTIC_VERTEXID_NOBASE:
579 instr = ctx->vertex_id;
580 break;
581 case TGSI_SEMANTIC_BASEVERTEX:
582 instr = ctx->basevertex;
583 break;
584 case TGSI_SEMANTIC_INSTANCEID:
585 instr = ctx->instance_id;
586 break;
587 }
588 break;
589 }
590
591 return instr;
592 }
593
594 static int dst_array_id(struct ir3_compile_context *ctx,
595 const struct tgsi_dst_register *dst)
596 {
597 // XXX complete hack to recover tgsi_full_dst_register...
598 // nothing that isn't wrapped in a tgsi_full_dst_register
599 // should be indirect
600 const struct tgsi_full_dst_register *fdst = (const void *)dst;
601 return fdst->Indirect.ArrayID + ctx->array_offsets[dst->File];
602 }
603
604 static int src_array_id(struct ir3_compile_context *ctx,
605 const struct tgsi_src_register *src)
606 {
607 // XXX complete hack to recover tgsi_full_src_register...
608 // nothing that isn't wrapped in a tgsi_full_src_register
609 // should be indirect
610 const struct tgsi_full_src_register *fsrc = (const void *)src;
611 debug_assert(src->File != TGSI_FILE_CONSTANT);
612 return fsrc->Indirect.ArrayID + ctx->array_offsets[src->File];
613 }
614
615 static struct ir3_instruction *
616 array_fanin(struct ir3_compile_context *ctx, unsigned aid, unsigned file)
617 {
618 struct ir3_instruction *instr;
619
620 if (ctx->array[aid].fanin) {
621 instr = ctx->array[aid].fanin;
622 } else {
623 unsigned first = ctx->array[aid].first;
624 unsigned last = ctx->array[aid].last;
625 unsigned i, j;
626
627 instr = ir3_instr_create2(ctx->block, -1, OPC_META_FI,
628 1 + (4 * (last + 1 - first)));
629 ir3_reg_create(instr, 0, 0);
630 for (i = first; i <= last; i++) {
631 for (j = 0; j < 4; j++) {
632 unsigned n = regid(i, j);
633 ir3_reg_create(instr, 0, IR3_REG_SSA)->instr =
634 ssa_instr_get(ctx, file, n);
635 }
636 }
637 ctx->array[aid].fanin = instr;
638 ctx->array_dirty |= (1 << aid);
639 }
640
641 return instr;
642 }
643
644 static void
645 ssa_dst(struct ir3_compile_context *ctx, struct ir3_instruction *instr,
646 const struct tgsi_dst_register *dst, unsigned chan)
647 {
648 if (dst->Indirect) {
649 struct ir3_register *reg = instr->regs[0];
650 unsigned i, aid = dst_array_id(ctx, dst);
651 unsigned first = ctx->array[aid].first;
652 unsigned last = ctx->array[aid].last;
653 unsigned off = dst->Index - first; /* vec4 offset */
654
655 reg->size = 4 * (1 + last - first);
656 reg->offset = regid(off, chan);
657
658 instr->fanin = array_fanin(ctx, aid, dst->File);
659
660 /* annotate with the array-id, to help out the register-
661 * assignment stage. At least for the case of indirect
662 * writes, we should capture enough dependencies to
663 * preserve the order of reads/writes of the array, so
664 * the multiple "names" for the array should end up all
665 * assigned to the same registers.
666 */
667 instr->fanin->fi.aid = aid;
668
669 /* Since we are scalarizing vec4 tgsi instructions/regs, we
670 * run into a slight complication here. To do the naive thing
671 * and setup a fanout for each scalar array element would end
672 * up with the result that the instructions generated for each
673 * component of the vec4 would end up clobbering each other.
674 * So we take advantage here of knowing that the array index
675 * (after the shl.b) will be a multiple of four, and only set
676 * every fourth scalar component in the array. See also
677 * fixup_ssa_dst_array()
678 */
679 for (i = first; i <= last; i++) {
680 struct ir3_instruction *split;
681 unsigned n = regid(i, chan);
682 int off = (4 * (i - first)) + chan;
683
684 if (is_meta(instr) && (instr->opc == OPC_META_FO))
685 off -= instr->fo.off;
686
687 split = ir3_instr_create(ctx->block, -1, OPC_META_FO);
688 split->fo.off = off;
689 ir3_reg_create(split, 0, 0);
690 ir3_reg_create(split, 0, IR3_REG_SSA)->instr = instr;
691
692 ssa_instr_set(ctx, dst->File, n, split);
693 }
694 } else {
695 /* normal case (not relative addressed GPR) */
696 ssa_instr_set(ctx, dst->File, regid(dst->Index, chan), instr);
697 }
698 }
699
700 static void
701 ssa_src(struct ir3_compile_context *ctx, struct ir3_register *reg,
702 const struct tgsi_src_register *src, unsigned chan)
703 {
704 struct ir3_instruction *instr;
705
706 if (src->Indirect && (src->File != TGSI_FILE_CONSTANT)) {
707 /* for relative addressing of gpr's (due to register assignment)
708 * we must generate a fanin instruction to collect all possible
709 * array elements that the instruction could address together:
710 */
711 unsigned aid = src_array_id(ctx, src);
712 unsigned first = ctx->array[aid].first;
713 unsigned last = ctx->array[aid].last;
714 unsigned off = src->Index - first; /* vec4 offset */
715
716 reg->size = 4 * (1 + last - first);
717 reg->offset = regid(off, chan);
718
719 instr = array_fanin(ctx, aid, src->File);
720 } else {
721 /* normal case (not relative addressed GPR) */
722 instr = ssa_instr_get(ctx, src->File, regid(src->Index, chan));
723 }
724
725 if (instr) {
726 reg->flags |= IR3_REG_SSA;
727 reg->instr = instr;
728 } else if (reg->flags & IR3_REG_SSA) {
729 /* special hack for trans_samp() which calls ssa_src() directly
730 * to build up the collect (fanin) for const src.. (so SSA flag
731 * set but no src instr... it basically gets lucky because we
732 * default to 0.0 for "undefined" src instructions, which is
733 * what it wants. We probably need to give it a better way to
734 * do this, but for now this hack:
735 */
736 reg->instr = create_immed(ctx, 0.0);
737 }
738 }
739
740 static struct ir3_register *
741 add_dst_reg_wrmask(struct ir3_compile_context *ctx,
742 struct ir3_instruction *instr, const struct tgsi_dst_register *dst,
743 unsigned chan, unsigned wrmask)
744 {
745 unsigned flags = 0, num = 0;
746 struct ir3_register *reg;
747
748 switch (dst->File) {
749 case TGSI_FILE_OUTPUT:
750 case TGSI_FILE_TEMPORARY:
751 /* uses SSA */
752 break;
753 case TGSI_FILE_ADDRESS:
754 flags |= IR3_REG_ADDR;
755 /* uses SSA */
756 break;
757 default:
758 compile_error(ctx, "unsupported dst register file: %s\n",
759 tgsi_file_name(dst->File));
760 break;
761 }
762
763 if (dst->Indirect) {
764 flags |= IR3_REG_RELATIV;
765
766 /* shouldn't happen, and we can't cope with it below: */
767 compile_assert(ctx, wrmask == 0x1);
768
769 compile_assert(ctx, ctx->block->address);
770 if (instr->address)
771 compile_assert(ctx, ctx->block->address == instr->address);
772
773 instr->address = ctx->block->address;
774 array_insert(ctx->ir->indirects, instr);
775 }
776
777 reg = ir3_reg_create(instr, regid(num, chan), flags);
778 reg->wrmask = wrmask;
779
780 if (wrmask == 0x1) {
781 /* normal case */
782 ssa_dst(ctx, instr, dst, chan);
783 } else if ((dst->File == TGSI_FILE_TEMPORARY) ||
784 (dst->File == TGSI_FILE_OUTPUT) ||
785 (dst->File == TGSI_FILE_ADDRESS)) {
786 struct ir3_instruction *prev = NULL;
787 unsigned i;
788
789 compile_assert(ctx, !dst->Indirect);
790
791 /* if instruction writes multiple, we need to create
792 * some place-holder collect the registers:
793 */
794 for (i = 0; i < 4; i++) {
795 /* NOTE: slightly ugly that we setup neighbor ptrs
796 * for FO here, but handle FI in CP pass.. we should
797 * probably just always setup neighbor ptrs in the
798 * frontend?
799 */
800 struct ir3_instruction *split =
801 ir3_instr_create(ctx->block, -1, OPC_META_FO);
802 split->fo.off = i;
803 /* unused dst reg: */
804 /* NOTE: set SSA flag on dst here, because unused FO's
805 * which don't get scheduled will end up not in the
806 * instruction list when RA sets SSA flag on each dst.
807 * Slight hack. We really should set SSA flag on
808 * every dst register in the frontend.
809 */
810 ir3_reg_create(split, 0, IR3_REG_SSA);
811 /* and src reg used to hold original instr */
812 ir3_reg_create(split, 0, IR3_REG_SSA)->instr = instr;
813 if (prev) {
814 split->cp.left = prev;
815 split->cp.left_cnt++;
816 prev->cp.right = split;
817 prev->cp.right_cnt++;
818 }
819 if ((wrmask & (1 << i)) && !ctx->atomic)
820 ssa_dst(ctx, split, dst, chan+i);
821 prev = split;
822 }
823 }
824
825 return reg;
826 }
827
828 static struct ir3_register *
829 add_dst_reg(struct ir3_compile_context *ctx, struct ir3_instruction *instr,
830 const struct tgsi_dst_register *dst, unsigned chan)
831 {
832 return add_dst_reg_wrmask(ctx, instr, dst, chan, 0x1);
833 }
834
835 static struct ir3_register *
836 add_src_reg_wrmask(struct ir3_compile_context *ctx,
837 struct ir3_instruction *instr, const struct tgsi_src_register *src,
838 unsigned chan, unsigned wrmask)
839 {
840 unsigned flags = 0, num = 0;
841 struct ir3_register *reg;
842
843 switch (src->File) {
844 case TGSI_FILE_IMMEDIATE:
845 /* TODO if possible, use actual immediate instead of const.. but
846 * TGSI has vec4 immediates, we can only embed scalar (of limited
847 * size, depending on instruction..)
848 */
849 flags |= IR3_REG_CONST;
850 num = src->Index + ctx->so->first_immediate;
851 break;
852 case TGSI_FILE_CONSTANT:
853 flags |= IR3_REG_CONST;
854 num = src->Index;
855 break;
856 case TGSI_FILE_OUTPUT:
857 /* NOTE: we should only end up w/ OUTPUT file for things like
858 * clamp()'ing saturated dst instructions
859 */
860 case TGSI_FILE_INPUT:
861 case TGSI_FILE_TEMPORARY:
862 case TGSI_FILE_SYSTEM_VALUE:
863 /* uses SSA */
864 break;
865 default:
866 compile_error(ctx, "unsupported src register file: %s\n",
867 tgsi_file_name(src->File));
868 break;
869 }
870
871 /* We seem to have 8 bits (6.2) for dst register always, so I think
872 * it is safe to assume GPR cannot be >=64
873 *
874 * cat3 instructions only have 8 bits for src2, but cannot take a
875 * const for src2
876 *
877 * cat5 and cat6 in some cases only has 8 bits, but cannot take a
878 * const for any src.
879 *
880 * Other than that we seem to have 12 bits to encode const src,
881 * except for cat1 which may only have 11 bits (but that seems like
882 * a bug)
883 */
884 if (flags & IR3_REG_CONST)
885 compile_assert(ctx, src->Index < (1 << 9));
886 else
887 compile_assert(ctx, src->Index < (1 << 6));
888
889 if (src->Absolute)
890 flags |= IR3_REG_ABS;
891 if (src->Negate)
892 flags |= IR3_REG_NEGATE;
893
894 if (src->Indirect) {
895 flags |= IR3_REG_RELATIV;
896
897 /* shouldn't happen, and we can't cope with it below: */
898 compile_assert(ctx, wrmask == 0x1);
899
900 compile_assert(ctx, ctx->block->address);
901 if (instr->address)
902 compile_assert(ctx, ctx->block->address == instr->address);
903
904 instr->address = ctx->block->address;
905 array_insert(ctx->ir->indirects, instr);
906 }
907
908 reg = ir3_reg_create(instr, regid(num, chan), flags);
909 reg->wrmask = wrmask;
910
911 if (wrmask == 0x1) {
912 /* normal case */
913 ssa_src(ctx, reg, src, chan);
914 } else if ((src->File == TGSI_FILE_TEMPORARY) ||
915 (src->File == TGSI_FILE_OUTPUT) ||
916 (src->File == TGSI_FILE_INPUT)) {
917 struct ir3_instruction *collect;
918 unsigned i;
919
920 compile_assert(ctx, !src->Indirect);
921
922 /* if instruction reads multiple, we need to create
923 * some place-holder collect the registers:
924 */
925 collect = ir3_instr_create(ctx->block, -1, OPC_META_FI);
926 ir3_reg_create(collect, 0, 0); /* unused dst reg */
927
928 for (i = 0; i < 4; i++) {
929 if (wrmask & (1 << i)) {
930 /* and src reg used point to the original instr */
931 ssa_src(ctx, ir3_reg_create(collect, 0, IR3_REG_SSA),
932 src, chan + i);
933 } else if (wrmask & ~((i << i) - 1)) {
934 /* if any remaining components, then dummy
935 * placeholder src reg to fill in the blanks:
936 */
937 ir3_reg_create(collect, 0, 0);
938 }
939 }
940
941 reg->flags |= IR3_REG_SSA;
942 reg->instr = collect;
943 }
944
945 return reg;
946 }
947
948 static struct ir3_register *
949 add_src_reg(struct ir3_compile_context *ctx, struct ir3_instruction *instr,
950 const struct tgsi_src_register *src, unsigned chan)
951 {
952 return add_src_reg_wrmask(ctx, instr, src, chan, 0x1);
953 }
954
955 static void
956 src_from_dst(struct tgsi_src_register *src, struct tgsi_dst_register *dst)
957 {
958 src->File = dst->File;
959 src->Indirect = dst->Indirect;
960 src->Dimension = dst->Dimension;
961 src->Index = dst->Index;
962 src->Absolute = 0;
963 src->Negate = 0;
964 src->SwizzleX = TGSI_SWIZZLE_X;
965 src->SwizzleY = TGSI_SWIZZLE_Y;
966 src->SwizzleZ = TGSI_SWIZZLE_Z;
967 src->SwizzleW = TGSI_SWIZZLE_W;
968 }
969
970 /* Get internal-temp src/dst to use for a sequence of instructions
971 * generated by a single TGSI op.
972 */
973 static struct tgsi_src_register *
974 get_internal_temp(struct ir3_compile_context *ctx,
975 struct tgsi_dst_register *tmp_dst)
976 {
977 struct tgsi_src_register *tmp_src;
978 int n;
979
980 tmp_dst->File = TGSI_FILE_TEMPORARY;
981 tmp_dst->WriteMask = TGSI_WRITEMASK_XYZW;
982 tmp_dst->Indirect = 0;
983 tmp_dst->Dimension = 0;
984
985 /* assign next temporary: */
986 n = ctx->num_internal_temps++;
987 compile_assert(ctx, n < ARRAY_SIZE(ctx->internal_temps));
988 tmp_src = &ctx->internal_temps[n];
989
990 tmp_dst->Index = ctx->info.file_max[TGSI_FILE_TEMPORARY] + n + 1;
991
992 src_from_dst(tmp_src, tmp_dst);
993
994 return tmp_src;
995 }
996
997 static inline bool
998 is_const(struct tgsi_src_register *src)
999 {
1000 return (src->File == TGSI_FILE_CONSTANT) ||
1001 (src->File == TGSI_FILE_IMMEDIATE);
1002 }
1003
1004 static inline bool
1005 is_relative(struct tgsi_src_register *src)
1006 {
1007 return src->Indirect;
1008 }
1009
1010 static inline bool
1011 is_rel_or_const(struct tgsi_src_register *src)
1012 {
1013 return is_relative(src) || is_const(src);
1014 }
1015
1016 static type_t
1017 get_ftype(struct ir3_compile_context *ctx)
1018 {
1019 return TYPE_F32;
1020 }
1021
1022 static type_t
1023 get_utype(struct ir3_compile_context *ctx)
1024 {
1025 return TYPE_U32;
1026 }
1027
1028 static type_t
1029 get_stype(struct ir3_compile_context *ctx)
1030 {
1031 return TYPE_S32;
1032 }
1033
1034 static unsigned
1035 src_swiz(struct tgsi_src_register *src, int chan)
1036 {
1037 switch (chan) {
1038 case 0: return src->SwizzleX;
1039 case 1: return src->SwizzleY;
1040 case 2: return src->SwizzleZ;
1041 case 3: return src->SwizzleW;
1042 }
1043 assert(0);
1044 return 0;
1045 }
1046
1047 /* for instructions that cannot take a const register as src, if needed
1048 * generate a move to temporary gpr:
1049 */
1050 static struct tgsi_src_register *
1051 get_unconst(struct ir3_compile_context *ctx, struct tgsi_src_register *src)
1052 {
1053 struct tgsi_dst_register tmp_dst;
1054 struct tgsi_src_register *tmp_src;
1055
1056 compile_assert(ctx, is_rel_or_const(src));
1057
1058 tmp_src = get_internal_temp(ctx, &tmp_dst);
1059
1060 create_mov(ctx, &tmp_dst, src);
1061
1062 return tmp_src;
1063 }
1064
1065 static void
1066 get_immediate(struct ir3_compile_context *ctx,
1067 struct tgsi_src_register *reg, uint32_t val)
1068 {
1069 unsigned neg, swiz, idx, i;
1070 /* actually maps 1:1 currently.. not sure if that is safe to rely on: */
1071 static const unsigned swiz2tgsi[] = {
1072 TGSI_SWIZZLE_X, TGSI_SWIZZLE_Y, TGSI_SWIZZLE_Z, TGSI_SWIZZLE_W,
1073 };
1074
1075 for (i = 0; i < ctx->immediate_idx; i++) {
1076 swiz = i % 4;
1077 idx = i / 4;
1078
1079 if (ctx->so->immediates[idx].val[swiz] == val) {
1080 neg = 0;
1081 break;
1082 }
1083
1084 if (ctx->so->immediates[idx].val[swiz] == -val) {
1085 neg = 1;
1086 break;
1087 }
1088 }
1089
1090 if (i == ctx->immediate_idx) {
1091 /* need to generate a new immediate: */
1092 swiz = i % 4;
1093 idx = i / 4;
1094 neg = 0;
1095 ctx->so->immediates[idx].val[swiz] = val;
1096 ctx->so->immediates_count = idx + 1;
1097 ctx->immediate_idx++;
1098 }
1099
1100 reg->File = TGSI_FILE_IMMEDIATE;
1101 reg->Indirect = 0;
1102 reg->Dimension = 0;
1103 reg->Index = idx;
1104 reg->Absolute = 0;
1105 reg->Negate = neg;
1106 reg->SwizzleX = swiz2tgsi[swiz];
1107 reg->SwizzleY = swiz2tgsi[swiz];
1108 reg->SwizzleZ = swiz2tgsi[swiz];
1109 reg->SwizzleW = swiz2tgsi[swiz];
1110 }
1111
1112 static void
1113 create_mov(struct ir3_compile_context *ctx, struct tgsi_dst_register *dst,
1114 struct tgsi_src_register *src)
1115 {
1116 type_t type_mov = get_ftype(ctx);
1117 unsigned i;
1118
1119 for (i = 0; i < 4; i++) {
1120 /* move to destination: */
1121 if (dst->WriteMask & (1 << i)) {
1122 struct ir3_instruction *instr;
1123
1124 if (src->Absolute || src->Negate) {
1125 /* can't have abs or neg on a mov instr, so use
1126 * absneg.f instead to handle these cases:
1127 */
1128 instr = instr_create(ctx, 2, OPC_ABSNEG_F);
1129 } else {
1130 instr = instr_create(ctx, 1, 0);
1131 instr->cat1.src_type = type_mov;
1132 instr->cat1.dst_type = type_mov;
1133 }
1134
1135 add_dst_reg(ctx, instr, dst, i);
1136 add_src_reg(ctx, instr, src, src_swiz(src, i));
1137 }
1138 }
1139 }
1140
1141 static void
1142 create_clamp(struct ir3_compile_context *ctx,
1143 struct tgsi_dst_register *dst, struct tgsi_src_register *val,
1144 struct tgsi_src_register *minval, struct tgsi_src_register *maxval)
1145 {
1146 struct ir3_instruction *instr;
1147
1148 instr = instr_create(ctx, 2, OPC_MAX_F);
1149 vectorize(ctx, instr, dst, 2, val, 0, minval, 0);
1150
1151 instr = instr_create(ctx, 2, OPC_MIN_F);
1152 vectorize(ctx, instr, dst, 2, val, 0, maxval, 0);
1153 }
1154
1155 static void
1156 create_clamp_imm(struct ir3_compile_context *ctx,
1157 struct tgsi_dst_register *dst,
1158 uint32_t minval, uint32_t maxval)
1159 {
1160 struct tgsi_src_register minconst, maxconst;
1161 struct tgsi_src_register src;
1162
1163 src_from_dst(&src, dst);
1164
1165 get_immediate(ctx, &minconst, minval);
1166 get_immediate(ctx, &maxconst, maxval);
1167
1168 create_clamp(ctx, dst, &src, &minconst, &maxconst);
1169 }
1170
1171 static struct tgsi_dst_register *
1172 get_dst(struct ir3_compile_context *ctx, struct tgsi_full_instruction *inst)
1173 {
1174 struct tgsi_dst_register *dst = &inst->Dst[0].Register;
1175 unsigned i;
1176
1177 compile_assert(ctx, !ctx->using_tmp_dst);
1178 ctx->using_tmp_dst = true;
1179
1180 for (i = 0; i < inst->Instruction.NumSrcRegs; i++) {
1181 struct tgsi_src_register *src = &inst->Src[i].Register;
1182 if ((src->File == dst->File) && (src->Index == dst->Index)) {
1183 if ((dst->WriteMask == TGSI_WRITEMASK_XYZW) &&
1184 (src->SwizzleX == TGSI_SWIZZLE_X) &&
1185 (src->SwizzleY == TGSI_SWIZZLE_Y) &&
1186 (src->SwizzleZ == TGSI_SWIZZLE_Z) &&
1187 (src->SwizzleW == TGSI_SWIZZLE_W))
1188 continue;
1189 ctx->tmp_src = get_internal_temp(ctx, &ctx->tmp_dst);
1190 ctx->tmp_dst.WriteMask = dst->WriteMask;
1191 dst = &ctx->tmp_dst;
1192 break;
1193 }
1194 }
1195 return dst;
1196 }
1197
1198 static void
1199 put_dst(struct ir3_compile_context *ctx, struct tgsi_full_instruction *inst,
1200 struct tgsi_dst_register *dst)
1201 {
1202 compile_assert(ctx, ctx->using_tmp_dst);
1203 ctx->using_tmp_dst = false;
1204
1205 /* if necessary, add mov back into original dst: */
1206 if (dst != &inst->Dst[0].Register) {
1207 create_mov(ctx, &inst->Dst[0].Register, ctx->tmp_src);
1208 }
1209 }
1210
1211 /* helper to generate the necessary repeat and/or additional instructions
1212 * to turn a scalar instruction into a vector operation:
1213 */
1214 static void
1215 vectorize(struct ir3_compile_context *ctx, struct ir3_instruction *instr,
1216 struct tgsi_dst_register *dst, int nsrcs, ...)
1217 {
1218 va_list ap;
1219 int i, j, n = 0;
1220
1221 instr_atomic_start(ctx);
1222
1223 for (i = 0; i < 4; i++) {
1224 if (dst->WriteMask & (1 << i)) {
1225 struct ir3_instruction *cur;
1226
1227 if (n++ == 0) {
1228 cur = instr;
1229 } else {
1230 cur = instr_create(ctx, instr->category, instr->opc);
1231 memcpy(cur->info, instr->info, sizeof(cur->info));
1232 }
1233
1234 add_dst_reg(ctx, cur, dst, i);
1235
1236 va_start(ap, nsrcs);
1237 for (j = 0; j < nsrcs; j++) {
1238 struct tgsi_src_register *src =
1239 va_arg(ap, struct tgsi_src_register *);
1240 unsigned flags = va_arg(ap, unsigned);
1241 struct ir3_register *reg;
1242 if (flags & IR3_REG_IMMED) {
1243 reg = ir3_reg_create(cur, 0, IR3_REG_IMMED);
1244 /* this is an ugly cast.. should have put flags first! */
1245 reg->iim_val = *(int *)&src;
1246 } else {
1247 reg = add_src_reg(ctx, cur, src, src_swiz(src, i));
1248 }
1249 reg->flags |= flags & ~IR3_REG_NEGATE;
1250 if (flags & IR3_REG_NEGATE)
1251 reg->flags ^= IR3_REG_NEGATE;
1252 }
1253 va_end(ap);
1254 }
1255 }
1256
1257 instr_atomic_end(ctx);
1258 }
1259
1260 /*
1261 * Handlers for TGSI instructions which do not have a 1:1 mapping to
1262 * native instructions:
1263 */
1264
1265 static void
1266 trans_clamp(const struct instr_translater *t,
1267 struct ir3_compile_context *ctx,
1268 struct tgsi_full_instruction *inst)
1269 {
1270 struct tgsi_dst_register *dst = get_dst(ctx, inst);
1271 struct tgsi_src_register *src0 = &inst->Src[0].Register;
1272 struct tgsi_src_register *src1 = &inst->Src[1].Register;
1273 struct tgsi_src_register *src2 = &inst->Src[2].Register;
1274
1275 create_clamp(ctx, dst, src0, src1, src2);
1276
1277 put_dst(ctx, inst, dst);
1278 }
1279
1280 /* ARL(x) = x, but mova from hrN.x to a0.. */
1281 static void
1282 trans_arl(const struct instr_translater *t,
1283 struct ir3_compile_context *ctx,
1284 struct tgsi_full_instruction *inst)
1285 {
1286 struct ir3_instruction *instr;
1287 struct tgsi_dst_register tmp_dst;
1288 struct tgsi_src_register *tmp_src;
1289 struct tgsi_dst_register *dst = &inst->Dst[0].Register;
1290 struct tgsi_src_register *src = &inst->Src[0].Register;
1291 unsigned chan = src->SwizzleX;
1292
1293 compile_assert(ctx, dst->File == TGSI_FILE_ADDRESS);
1294
1295 /* NOTE: we allocate a temporary from a flat register
1296 * namespace (ignoring half vs full). It turns out
1297 * not to really matter since registers get reassigned
1298 * later in ir3_ra which (hopefully!) can deal a bit
1299 * better with mixed half and full precision.
1300 */
1301 tmp_src = get_internal_temp(ctx, &tmp_dst);
1302
1303 /* cov.{u,f}{32,16}s16 Rtmp, Rsrc */
1304 instr = instr_create(ctx, 1, 0);
1305 instr->cat1.src_type = (t->tgsi_opc == TGSI_OPCODE_ARL) ?
1306 get_ftype(ctx) : get_utype(ctx);
1307 instr->cat1.dst_type = TYPE_S16;
1308 add_dst_reg(ctx, instr, &tmp_dst, chan)->flags |= IR3_REG_HALF;
1309 add_src_reg(ctx, instr, src, chan);
1310
1311 /* shl.b Rtmp, Rtmp, 2 */
1312 instr = instr_create(ctx, 2, OPC_SHL_B);
1313 add_dst_reg(ctx, instr, &tmp_dst, chan)->flags |= IR3_REG_HALF;
1314 add_src_reg(ctx, instr, tmp_src, chan)->flags |= IR3_REG_HALF;
1315 ir3_reg_create(instr, 0, IR3_REG_IMMED)->iim_val = 2;
1316
1317 /* mova a0, Rtmp */
1318 instr = instr_create(ctx, 1, 0);
1319 instr->cat1.src_type = TYPE_S16;
1320 instr->cat1.dst_type = TYPE_S16;
1321 add_dst_reg(ctx, instr, dst, 0)->flags |= IR3_REG_HALF;
1322 add_src_reg(ctx, instr, tmp_src, chan)->flags |= IR3_REG_HALF;
1323 }
1324
1325 /*
1326 * texture fetch/sample instructions:
1327 */
1328
1329 struct tex_info {
1330 int8_t order[4];
1331 int8_t args;
1332 unsigned src_wrmask, flags;
1333 };
1334
1335 struct target_info {
1336 uint8_t dims;
1337 uint8_t cube;
1338 uint8_t array;
1339 uint8_t shadow;
1340 };
1341
1342 static const struct target_info tex_targets[] = {
1343 [TGSI_TEXTURE_1D] = { 1, 0, 0, 0 },
1344 [TGSI_TEXTURE_2D] = { 2, 0, 0, 0 },
1345 [TGSI_TEXTURE_3D] = { 3, 0, 0, 0 },
1346 [TGSI_TEXTURE_CUBE] = { 3, 1, 0, 0 },
1347 [TGSI_TEXTURE_RECT] = { 2, 0, 0, 0 },
1348 [TGSI_TEXTURE_SHADOW1D] = { 1, 0, 0, 1 },
1349 [TGSI_TEXTURE_SHADOW2D] = { 2, 0, 0, 1 },
1350 [TGSI_TEXTURE_SHADOWRECT] = { 2, 0, 0, 1 },
1351 [TGSI_TEXTURE_1D_ARRAY] = { 1, 0, 1, 0 },
1352 [TGSI_TEXTURE_2D_ARRAY] = { 2, 0, 1, 0 },
1353 [TGSI_TEXTURE_SHADOW1D_ARRAY] = { 1, 0, 1, 1 },
1354 [TGSI_TEXTURE_SHADOW2D_ARRAY] = { 2, 0, 1, 1 },
1355 [TGSI_TEXTURE_SHADOWCUBE] = { 3, 1, 0, 1 },
1356 [TGSI_TEXTURE_2D_MSAA] = { 2, 0, 0, 0 },
1357 [TGSI_TEXTURE_2D_ARRAY_MSAA] = { 2, 0, 1, 0 },
1358 [TGSI_TEXTURE_CUBE_ARRAY] = { 3, 1, 1, 0 },
1359 [TGSI_TEXTURE_SHADOWCUBE_ARRAY] = { 3, 1, 1, 1 },
1360 };
1361
1362 static void
1363 fill_tex_info(struct ir3_compile_context *ctx,
1364 struct tgsi_full_instruction *inst,
1365 struct tex_info *info)
1366 {
1367 const struct target_info *tgt = &tex_targets[inst->Texture.Texture];
1368
1369 if (tgt->dims == 3)
1370 info->flags |= IR3_INSTR_3D;
1371 if (tgt->array)
1372 info->flags |= IR3_INSTR_A;
1373 if (tgt->shadow)
1374 info->flags |= IR3_INSTR_S;
1375
1376 switch (inst->Instruction.Opcode) {
1377 case TGSI_OPCODE_TXB:
1378 case TGSI_OPCODE_TXB2:
1379 case TGSI_OPCODE_TXL:
1380 case TGSI_OPCODE_TXF:
1381 info->args = 2;
1382 break;
1383 case TGSI_OPCODE_TXP:
1384 info->flags |= IR3_INSTR_P;
1385 /* fallthrough */
1386 case TGSI_OPCODE_TEX:
1387 case TGSI_OPCODE_TXD:
1388 info->args = 1;
1389 break;
1390 }
1391
1392 /*
1393 * lay out the first argument in the proper order:
1394 * - actual coordinates first
1395 * - array index
1396 * - shadow reference
1397 * - projection w
1398 *
1399 * bias/lod go into the second arg
1400 */
1401 int arg, pos = 0;
1402 for (arg = 0; arg < tgt->dims; arg++)
1403 info->order[arg] = pos++;
1404 if (tgt->dims == 1)
1405 info->order[pos++] = -1;
1406 if (tgt->shadow)
1407 info->order[pos++] = MAX2(arg + tgt->array, 2);
1408 if (tgt->array)
1409 info->order[pos++] = arg++;
1410 if (info->flags & IR3_INSTR_P)
1411 info->order[pos++] = 3;
1412
1413 info->src_wrmask = (1 << pos) - 1;
1414
1415 for (; pos < 4; pos++)
1416 info->order[pos] = -1;
1417
1418 assert(pos <= 4);
1419 }
1420
1421 static bool check_swiz(struct tgsi_src_register *src, const int8_t order[4])
1422 {
1423 unsigned i;
1424 for (i = 1; (i < 4) && order[i] >= 0; i++)
1425 if (src_swiz(src, i) != (src_swiz(src, 0) + order[i]))
1426 return false;
1427 return true;
1428 }
1429
1430 static bool is_1d(unsigned tex)
1431 {
1432 return tex_targets[tex].dims == 1;
1433 }
1434
1435 static struct tgsi_src_register *
1436 get_tex_coord(struct ir3_compile_context *ctx,
1437 struct tgsi_full_instruction *inst,
1438 const struct tex_info *tinf)
1439 {
1440 struct tgsi_src_register *coord = &inst->Src[0].Register;
1441 struct ir3_instruction *instr;
1442 unsigned tex = inst->Texture.Texture;
1443 struct tgsi_dst_register tmp_dst;
1444 struct tgsi_src_register *tmp_src;
1445 type_t type_mov = get_ftype(ctx);
1446 unsigned j;
1447
1448 /* need to move things around: */
1449 tmp_src = get_internal_temp(ctx, &tmp_dst);
1450
1451 for (j = 0; j < 4; j++) {
1452 if (tinf->order[j] < 0)
1453 continue;
1454 instr = instr_create(ctx, 1, 0); /* mov */
1455 instr->cat1.src_type = type_mov;
1456 instr->cat1.dst_type = type_mov;
1457 add_dst_reg(ctx, instr, &tmp_dst, j);
1458 add_src_reg(ctx, instr, coord,
1459 src_swiz(coord, tinf->order[j]));
1460 }
1461
1462 /* fix up .y coord: */
1463 if (is_1d(tex)) {
1464 struct ir3_register *imm;
1465 instr = instr_create(ctx, 1, 0); /* mov */
1466 instr->cat1.src_type = type_mov;
1467 instr->cat1.dst_type = type_mov;
1468 add_dst_reg(ctx, instr, &tmp_dst, 1); /* .y */
1469 imm = ir3_reg_create(instr, 0, IR3_REG_IMMED);
1470 if (inst->Instruction.Opcode == TGSI_OPCODE_TXF)
1471 imm->iim_val = 0;
1472 else
1473 imm->fim_val = 0.5;
1474 }
1475
1476 return tmp_src;
1477 }
1478
1479 static void
1480 trans_samp(const struct instr_translater *t,
1481 struct ir3_compile_context *ctx,
1482 struct tgsi_full_instruction *inst)
1483 {
1484 struct ir3_instruction *instr, *collect;
1485 struct ir3_register *reg;
1486 struct tgsi_dst_register *dst = &inst->Dst[0].Register;
1487 struct tgsi_src_register *orig, *coord, *samp, *offset, *dpdx, *dpdy;
1488 struct tgsi_src_register zero;
1489 const struct target_info *tgt = &tex_targets[inst->Texture.Texture];
1490 struct tex_info tinf;
1491 int i;
1492
1493 memset(&tinf, 0, sizeof(tinf));
1494 fill_tex_info(ctx, inst, &tinf);
1495 coord = get_tex_coord(ctx, inst, &tinf);
1496 get_immediate(ctx, &zero, 0);
1497
1498 switch (inst->Instruction.Opcode) {
1499 case TGSI_OPCODE_TXB2:
1500 orig = &inst->Src[1].Register;
1501 samp = &inst->Src[2].Register;
1502 break;
1503 case TGSI_OPCODE_TXD:
1504 orig = &inst->Src[0].Register;
1505 dpdx = &inst->Src[1].Register;
1506 dpdy = &inst->Src[2].Register;
1507 samp = &inst->Src[3].Register;
1508 if (is_rel_or_const(dpdx))
1509 dpdx = get_unconst(ctx, dpdx);
1510 if (is_rel_or_const(dpdy))
1511 dpdy = get_unconst(ctx, dpdy);
1512 break;
1513 default:
1514 orig = &inst->Src[0].Register;
1515 samp = &inst->Src[1].Register;
1516 break;
1517 }
1518 if (tinf.args > 1 && is_rel_or_const(orig))
1519 orig = get_unconst(ctx, orig);
1520
1521 /* scale up integer coords for TXF based on the LOD */
1522 if (inst->Instruction.Opcode == TGSI_OPCODE_TXF) {
1523 struct tgsi_dst_register tmp_dst;
1524 struct tgsi_src_register *tmp_src;
1525 type_t type_mov = get_utype(ctx);
1526
1527 tmp_src = get_internal_temp(ctx, &tmp_dst);
1528 for (i = 0; i < tgt->dims; i++) {
1529 instr = instr_create(ctx, 2, OPC_SHL_B);
1530 add_dst_reg(ctx, instr, &tmp_dst, i);
1531 add_src_reg(ctx, instr, coord, src_swiz(coord, i));
1532 add_src_reg(ctx, instr, orig, orig->SwizzleW);
1533 }
1534 if (tgt->dims < 2) {
1535 instr = instr_create(ctx, 1, 0);
1536 instr->cat1.src_type = type_mov;
1537 instr->cat1.dst_type = type_mov;
1538 add_dst_reg(ctx, instr, &tmp_dst, i);
1539 add_src_reg(ctx, instr, &zero, 0);
1540 i++;
1541 }
1542 if (tgt->array) {
1543 instr = instr_create(ctx, 1, 0);
1544 instr->cat1.src_type = type_mov;
1545 instr->cat1.dst_type = type_mov;
1546 add_dst_reg(ctx, instr, &tmp_dst, i);
1547 add_src_reg(ctx, instr, coord, src_swiz(coord, i));
1548 }
1549 coord = tmp_src;
1550 }
1551
1552 if (inst->Texture.NumOffsets) {
1553 struct tgsi_texture_offset *tex_offset = &inst->TexOffsets[0];
1554 struct tgsi_src_register offset_src = {0};
1555
1556 offset_src.File = tex_offset->File;
1557 offset_src.Index = tex_offset->Index;
1558 offset_src.SwizzleX = tex_offset->SwizzleX;
1559 offset_src.SwizzleY = tex_offset->SwizzleY;
1560 offset_src.SwizzleZ = tex_offset->SwizzleZ;
1561 offset = get_unconst(ctx, &offset_src);
1562 tinf.flags |= IR3_INSTR_O;
1563 }
1564
1565 instr = instr_create(ctx, 5, t->opc);
1566 if (ctx->integer_s & (1 << samp->Index))
1567 instr->cat5.type = get_utype(ctx);
1568 else
1569 instr->cat5.type = get_ftype(ctx);
1570 instr->cat5.samp = samp->Index;
1571 instr->cat5.tex = samp->Index;
1572 instr->flags |= tinf.flags;
1573
1574 add_dst_reg_wrmask(ctx, instr, dst, 0, dst->WriteMask);
1575
1576 reg = ir3_reg_create(instr, 0, IR3_REG_SSA);
1577
1578 collect = ir3_instr_create2(ctx->block, -1, OPC_META_FI, 12);
1579 ir3_reg_create(collect, 0, 0);
1580 for (i = 0; i < 4; i++) {
1581 if (tinf.src_wrmask & (1 << i))
1582 ssa_src(ctx, ir3_reg_create(collect, 0, IR3_REG_SSA),
1583 coord, src_swiz(coord, i));
1584 else if (tinf.src_wrmask & ~((1 << i) - 1))
1585 ir3_reg_create(collect, 0, 0);
1586 }
1587
1588 /* Attach derivatives onto the end of the fan-in. Derivatives start after
1589 * the 4th argument, so make sure that fi is padded up to 4 first.
1590 */
1591 if (inst->Instruction.Opcode == TGSI_OPCODE_TXD) {
1592 while (collect->regs_count < 5)
1593 ssa_src(ctx, ir3_reg_create(collect, 0, IR3_REG_SSA), &zero, 0);
1594 for (i = 0; i < tgt->dims; i++)
1595 ssa_src(ctx, ir3_reg_create(collect, 0, IR3_REG_SSA), dpdx, i);
1596 if (tgt->dims < 2)
1597 ssa_src(ctx, ir3_reg_create(collect, 0, IR3_REG_SSA), &zero, 0);
1598 for (i = 0; i < tgt->dims; i++)
1599 ssa_src(ctx, ir3_reg_create(collect, 0, IR3_REG_SSA), dpdy, i);
1600 if (tgt->dims < 2)
1601 ssa_src(ctx, ir3_reg_create(collect, 0, IR3_REG_SSA), &zero, 0);
1602 tinf.src_wrmask |= ((1 << (2 * MAX2(tgt->dims, 2))) - 1) << 4;
1603 }
1604
1605 reg->instr = collect;
1606 reg->wrmask = tinf.src_wrmask;
1607
1608 /* The second argument contains the offsets, followed by the lod/bias
1609 * argument. This is constructed more manually due to the dynamic nature.
1610 */
1611 if (inst->Texture.NumOffsets == 0 && tinf.args == 1)
1612 return;
1613
1614 reg = ir3_reg_create(instr, 0, IR3_REG_SSA);
1615
1616 collect = ir3_instr_create2(ctx->block, -1, OPC_META_FI, 5);
1617 ir3_reg_create(collect, 0, 0);
1618
1619 if (inst->Texture.NumOffsets) {
1620 for (i = 0; i < tgt->dims; i++)
1621 ssa_src(ctx, ir3_reg_create(collect, 0, IR3_REG_SSA),
1622 offset, i);
1623 if (tgt->dims < 2)
1624 ssa_src(ctx, ir3_reg_create(collect, 0, IR3_REG_SSA), &zero, 0);
1625 }
1626 if (inst->Instruction.Opcode == TGSI_OPCODE_TXB2)
1627 ssa_src(ctx, ir3_reg_create(collect, 0, IR3_REG_SSA),
1628 orig, orig->SwizzleX);
1629 else if (tinf.args > 1)
1630 ssa_src(ctx, ir3_reg_create(collect, 0, IR3_REG_SSA),
1631 orig, orig->SwizzleW);
1632
1633 reg->instr = collect;
1634 reg->wrmask = (1 << (collect->regs_count - 1)) - 1;
1635 }
1636
1637 static void
1638 trans_txq(const struct instr_translater *t,
1639 struct ir3_compile_context *ctx,
1640 struct tgsi_full_instruction *inst)
1641 {
1642 struct ir3_instruction *instr;
1643 struct tgsi_dst_register *dst = &inst->Dst[0].Register;
1644 struct tgsi_src_register *level = &inst->Src[0].Register;
1645 struct tgsi_src_register *samp = &inst->Src[1].Register;
1646 const struct target_info *tgt = &tex_targets[inst->Texture.Texture];
1647 struct tex_info tinf;
1648
1649 memset(&tinf, 0, sizeof(tinf));
1650 fill_tex_info(ctx, inst, &tinf);
1651 if (is_rel_or_const(level))
1652 level = get_unconst(ctx, level);
1653
1654 instr = instr_create(ctx, 5, OPC_GETSIZE);
1655 instr->cat5.type = get_utype(ctx);
1656 instr->cat5.samp = samp->Index;
1657 instr->cat5.tex = samp->Index;
1658 instr->flags |= tinf.flags;
1659
1660 if (tgt->array && (dst->WriteMask & (1 << tgt->dims))) {
1661 /* Array size actually ends up in .w rather than .z. This doesn't
1662 * matter for miplevel 0, but for higher mips the value in z is
1663 * minified whereas w stays. Also, the value in TEX_CONST_3_DEPTH is
1664 * returned, which means that we have to add 1 to it for arrays.
1665 */
1666 struct tgsi_dst_register tmp_dst;
1667 struct tgsi_src_register *tmp_src;
1668 type_t type_mov = get_utype(ctx);
1669
1670 tmp_src = get_internal_temp(ctx, &tmp_dst);
1671 add_dst_reg_wrmask(ctx, instr, &tmp_dst, 0,
1672 dst->WriteMask | TGSI_WRITEMASK_W);
1673 add_src_reg_wrmask(ctx, instr, level, level->SwizzleX, 0x1);
1674
1675 if (dst->WriteMask & TGSI_WRITEMASK_X) {
1676 instr = instr_create(ctx, 1, 0);
1677 instr->cat1.src_type = type_mov;
1678 instr->cat1.dst_type = type_mov;
1679 add_dst_reg(ctx, instr, dst, 0);
1680 add_src_reg(ctx, instr, tmp_src, src_swiz(tmp_src, 0));
1681 }
1682
1683 if (tgt->dims == 2) {
1684 if (dst->WriteMask & TGSI_WRITEMASK_Y) {
1685 instr = instr_create(ctx, 1, 0);
1686 instr->cat1.src_type = type_mov;
1687 instr->cat1.dst_type = type_mov;
1688 add_dst_reg(ctx, instr, dst, 1);
1689 add_src_reg(ctx, instr, tmp_src, src_swiz(tmp_src, 1));
1690 }
1691 }
1692
1693 instr = instr_create(ctx, 2, OPC_ADD_U);
1694 add_dst_reg(ctx, instr, dst, tgt->dims);
1695 add_src_reg(ctx, instr, tmp_src, src_swiz(tmp_src, 3));
1696 ir3_reg_create(instr, 0, IR3_REG_IMMED)->iim_val = 1;
1697 } else {
1698 add_dst_reg_wrmask(ctx, instr, dst, 0, dst->WriteMask);
1699 add_src_reg_wrmask(ctx, instr, level, level->SwizzleX, 0x1);
1700 }
1701
1702 if (dst->WriteMask & TGSI_WRITEMASK_W) {
1703 /* The # of levels comes from getinfo.z. We need to add 1 to it, since
1704 * the value in TEX_CONST_0 is zero-based.
1705 */
1706 struct tgsi_dst_register tmp_dst;
1707 struct tgsi_src_register *tmp_src;
1708
1709 tmp_src = get_internal_temp(ctx, &tmp_dst);
1710 instr = instr_create(ctx, 5, OPC_GETINFO);
1711 instr->cat5.type = get_utype(ctx);
1712 instr->cat5.samp = samp->Index;
1713 instr->cat5.tex = samp->Index;
1714 add_dst_reg_wrmask(ctx, instr, &tmp_dst, 0, TGSI_WRITEMASK_Z);
1715
1716 instr = instr_create(ctx, 2, OPC_ADD_U);
1717 add_dst_reg(ctx, instr, dst, 3);
1718 add_src_reg(ctx, instr, tmp_src, src_swiz(tmp_src, 2));
1719 ir3_reg_create(instr, 0, IR3_REG_IMMED)->iim_val = 1;
1720 }
1721 }
1722
1723 /* DDX/DDY */
1724 static void
1725 trans_deriv(const struct instr_translater *t,
1726 struct ir3_compile_context *ctx,
1727 struct tgsi_full_instruction *inst)
1728 {
1729 struct ir3_instruction *instr;
1730 struct tgsi_dst_register *dst = &inst->Dst[0].Register;
1731 struct tgsi_src_register *src = &inst->Src[0].Register;
1732 static const int8_t order[4] = {0, 1, 2, 3};
1733
1734 if (!check_swiz(src, order)) {
1735 struct tgsi_dst_register tmp_dst;
1736 struct tgsi_src_register *tmp_src;
1737
1738 tmp_src = get_internal_temp(ctx, &tmp_dst);
1739 create_mov(ctx, &tmp_dst, src);
1740
1741 src = tmp_src;
1742 }
1743
1744 /* This might be a workaround for hw bug? Blob compiler always
1745 * seems to work two components at a time for dsy/dsx. It does
1746 * actually seem to work in some cases (or at least some piglit
1747 * tests) for four components at a time. But seems more reliable
1748 * to split this into two instructions like the blob compiler
1749 * does:
1750 */
1751
1752 instr = instr_create(ctx, 5, t->opc);
1753 instr->cat5.type = get_ftype(ctx);
1754 add_dst_reg_wrmask(ctx, instr, dst, 0, dst->WriteMask & 0x3);
1755 add_src_reg_wrmask(ctx, instr, src, 0, dst->WriteMask & 0x3);
1756
1757 instr = instr_create(ctx, 5, t->opc);
1758 instr->cat5.type = get_ftype(ctx);
1759 add_dst_reg_wrmask(ctx, instr, dst, 2, (dst->WriteMask >> 2) & 0x3);
1760 add_src_reg_wrmask(ctx, instr, src, 2, (dst->WriteMask >> 2) & 0x3);
1761 }
1762
1763 /*
1764 * SEQ(a,b) = (a == b) ? 1.0 : 0.0
1765 * cmps.f.eq tmp0, a, b
1766 * cov.u16f16 dst, tmp0
1767 *
1768 * SNE(a,b) = (a != b) ? 1.0 : 0.0
1769 * cmps.f.ne tmp0, a, b
1770 * cov.u16f16 dst, tmp0
1771 *
1772 * SGE(a,b) = (a >= b) ? 1.0 : 0.0
1773 * cmps.f.ge tmp0, a, b
1774 * cov.u16f16 dst, tmp0
1775 *
1776 * SLE(a,b) = (a <= b) ? 1.0 : 0.0
1777 * cmps.f.le tmp0, a, b
1778 * cov.u16f16 dst, tmp0
1779 *
1780 * SGT(a,b) = (a > b) ? 1.0 : 0.0
1781 * cmps.f.gt tmp0, a, b
1782 * cov.u16f16 dst, tmp0
1783 *
1784 * SLT(a,b) = (a < b) ? 1.0 : 0.0
1785 * cmps.f.lt tmp0, a, b
1786 * cov.u16f16 dst, tmp0
1787 *
1788 * CMP(a,b,c) = (a < 0.0) ? b : c
1789 * cmps.f.lt tmp0, a, {0.0}
1790 * sel.b16 dst, b, tmp0, c
1791 */
1792 static void
1793 trans_cmp(const struct instr_translater *t,
1794 struct ir3_compile_context *ctx,
1795 struct tgsi_full_instruction *inst)
1796 {
1797 struct ir3_instruction *instr;
1798 struct tgsi_dst_register tmp_dst;
1799 struct tgsi_src_register *tmp_src;
1800 struct tgsi_src_register constval0;
1801 /* final instruction for CMP() uses orig src1 and src2: */
1802 struct tgsi_dst_register *dst = get_dst(ctx, inst);
1803 struct tgsi_src_register *a0, *a1, *a2;
1804 unsigned condition;
1805
1806 tmp_src = get_internal_temp(ctx, &tmp_dst);
1807
1808 a0 = &inst->Src[0].Register; /* a */
1809 a1 = &inst->Src[1].Register; /* b */
1810
1811 switch (t->tgsi_opc) {
1812 case TGSI_OPCODE_SEQ:
1813 case TGSI_OPCODE_FSEQ:
1814 condition = IR3_COND_EQ;
1815 break;
1816 case TGSI_OPCODE_SNE:
1817 case TGSI_OPCODE_FSNE:
1818 condition = IR3_COND_NE;
1819 break;
1820 case TGSI_OPCODE_SGE:
1821 case TGSI_OPCODE_FSGE:
1822 condition = IR3_COND_GE;
1823 break;
1824 case TGSI_OPCODE_SLT:
1825 case TGSI_OPCODE_FSLT:
1826 condition = IR3_COND_LT;
1827 break;
1828 case TGSI_OPCODE_SLE:
1829 condition = IR3_COND_LE;
1830 break;
1831 case TGSI_OPCODE_SGT:
1832 condition = IR3_COND_GT;
1833 break;
1834 case TGSI_OPCODE_CMP:
1835 get_immediate(ctx, &constval0, fui(0.0));
1836 a0 = &inst->Src[0].Register; /* a */
1837 a1 = &constval0; /* {0.0} */
1838 condition = IR3_COND_LT;
1839 break;
1840 default:
1841 compile_assert(ctx, 0);
1842 return;
1843 }
1844
1845 if (is_const(a0) && is_const(a1))
1846 a0 = get_unconst(ctx, a0);
1847
1848 /* cmps.f.<cond> tmp, a0, a1 */
1849 instr = instr_create(ctx, 2, OPC_CMPS_F);
1850 instr->cat2.condition = condition;
1851 vectorize(ctx, instr, &tmp_dst, 2, a0, 0, a1, 0);
1852
1853 switch (t->tgsi_opc) {
1854 case TGSI_OPCODE_SEQ:
1855 case TGSI_OPCODE_SGE:
1856 case TGSI_OPCODE_SLE:
1857 case TGSI_OPCODE_SNE:
1858 case TGSI_OPCODE_SGT:
1859 case TGSI_OPCODE_SLT:
1860 /* cov.u16f16 dst, tmp0 */
1861 instr = instr_create(ctx, 1, 0);
1862 instr->cat1.src_type = get_utype(ctx);
1863 instr->cat1.dst_type = get_ftype(ctx);
1864 vectorize(ctx, instr, dst, 1, tmp_src, 0);
1865 break;
1866 case TGSI_OPCODE_FSEQ:
1867 case TGSI_OPCODE_FSGE:
1868 case TGSI_OPCODE_FSNE:
1869 case TGSI_OPCODE_FSLT:
1870 /* absneg.s dst, (neg)tmp0 */
1871 instr = instr_create(ctx, 2, OPC_ABSNEG_S);
1872 vectorize(ctx, instr, dst, 1, tmp_src, IR3_REG_NEGATE);
1873 break;
1874 case TGSI_OPCODE_CMP:
1875 a1 = &inst->Src[1].Register;
1876 a2 = &inst->Src[2].Register;
1877 /* sel.{b32,b16} dst, src2, tmp, src1 */
1878 instr = instr_create(ctx, 3, OPC_SEL_B32);
1879 vectorize(ctx, instr, dst, 3, a1, 0, tmp_src, 0, a2, 0);
1880
1881 break;
1882 }
1883
1884 put_dst(ctx, inst, dst);
1885 }
1886
1887 /*
1888 * USNE(a,b) = (a != b) ? ~0 : 0
1889 * cmps.u32.ne dst, a, b
1890 *
1891 * USEQ(a,b) = (a == b) ? ~0 : 0
1892 * cmps.u32.eq dst, a, b
1893 *
1894 * ISGE(a,b) = (a > b) ? ~0 : 0
1895 * cmps.s32.ge dst, a, b
1896 *
1897 * USGE(a,b) = (a > b) ? ~0 : 0
1898 * cmps.u32.ge dst, a, b
1899 *
1900 * ISLT(a,b) = (a < b) ? ~0 : 0
1901 * cmps.s32.lt dst, a, b
1902 *
1903 * USLT(a,b) = (a < b) ? ~0 : 0
1904 * cmps.u32.lt dst, a, b
1905 *
1906 */
1907 static void
1908 trans_icmp(const struct instr_translater *t,
1909 struct ir3_compile_context *ctx,
1910 struct tgsi_full_instruction *inst)
1911 {
1912 struct ir3_instruction *instr;
1913 struct tgsi_dst_register *dst = get_dst(ctx, inst);
1914 struct tgsi_dst_register tmp_dst;
1915 struct tgsi_src_register *tmp_src;
1916 struct tgsi_src_register *a0, *a1;
1917 unsigned condition;
1918
1919 a0 = &inst->Src[0].Register; /* a */
1920 a1 = &inst->Src[1].Register; /* b */
1921
1922 switch (t->tgsi_opc) {
1923 case TGSI_OPCODE_USNE:
1924 condition = IR3_COND_NE;
1925 break;
1926 case TGSI_OPCODE_USEQ:
1927 condition = IR3_COND_EQ;
1928 break;
1929 case TGSI_OPCODE_ISGE:
1930 case TGSI_OPCODE_USGE:
1931 condition = IR3_COND_GE;
1932 break;
1933 case TGSI_OPCODE_ISLT:
1934 case TGSI_OPCODE_USLT:
1935 condition = IR3_COND_LT;
1936 break;
1937
1938 default:
1939 compile_assert(ctx, 0);
1940 return;
1941 }
1942
1943 if (is_const(a0) && is_const(a1))
1944 a0 = get_unconst(ctx, a0);
1945
1946 tmp_src = get_internal_temp(ctx, &tmp_dst);
1947 /* cmps.{u32,s32}.<cond> tmp, a0, a1 */
1948 instr = instr_create(ctx, 2, t->opc);
1949 instr->cat2.condition = condition;
1950 vectorize(ctx, instr, &tmp_dst, 2, a0, 0, a1, 0);
1951
1952 /* absneg.s dst, (neg)tmp */
1953 instr = instr_create(ctx, 2, OPC_ABSNEG_S);
1954 vectorize(ctx, instr, dst, 1, tmp_src, IR3_REG_NEGATE);
1955
1956 put_dst(ctx, inst, dst);
1957 }
1958
1959 /*
1960 * UCMP(a,b,c) = a ? b : c
1961 * sel.b16 dst, b, a, c
1962 */
1963 static void
1964 trans_ucmp(const struct instr_translater *t,
1965 struct ir3_compile_context *ctx,
1966 struct tgsi_full_instruction *inst)
1967 {
1968 struct ir3_instruction *instr;
1969 struct tgsi_dst_register *dst = get_dst(ctx, inst);
1970 struct tgsi_src_register *a0, *a1, *a2;
1971
1972 a0 = &inst->Src[0].Register; /* a */
1973 a1 = &inst->Src[1].Register; /* b */
1974 a2 = &inst->Src[2].Register; /* c */
1975
1976 if (is_rel_or_const(a0))
1977 a0 = get_unconst(ctx, a0);
1978
1979 /* sel.{b32,b16} dst, b, a, c */
1980 instr = instr_create(ctx, 3, OPC_SEL_B32);
1981 vectorize(ctx, instr, dst, 3, a1, 0, a0, 0, a2, 0);
1982 put_dst(ctx, inst, dst);
1983 }
1984
1985 /*
1986 * ISSG(a) = a < 0 ? -1 : a > 0 ? 1 : 0
1987 * cmps.s.lt tmp_neg, a, 0 # 1 if a is negative
1988 * cmps.s.gt tmp_pos, a, 0 # 1 if a is positive
1989 * sub.u dst, tmp_pos, tmp_neg
1990 */
1991 static void
1992 trans_issg(const struct instr_translater *t,
1993 struct ir3_compile_context *ctx,
1994 struct tgsi_full_instruction *inst)
1995 {
1996 struct ir3_instruction *instr;
1997 struct tgsi_dst_register *dst = get_dst(ctx, inst);
1998 struct tgsi_src_register *a = &inst->Src[0].Register;
1999 struct tgsi_dst_register neg_dst, pos_dst;
2000 struct tgsi_src_register *neg_src, *pos_src;
2001
2002 neg_src = get_internal_temp(ctx, &neg_dst);
2003 pos_src = get_internal_temp(ctx, &pos_dst);
2004
2005 /* cmps.s.lt neg, a, 0 */
2006 instr = instr_create(ctx, 2, OPC_CMPS_S);
2007 instr->cat2.condition = IR3_COND_LT;
2008 vectorize(ctx, instr, &neg_dst, 2, a, 0, 0, IR3_REG_IMMED);
2009
2010 /* cmps.s.gt pos, a, 0 */
2011 instr = instr_create(ctx, 2, OPC_CMPS_S);
2012 instr->cat2.condition = IR3_COND_GT;
2013 vectorize(ctx, instr, &pos_dst, 2, a, 0, 0, IR3_REG_IMMED);
2014
2015 /* sub.u dst, pos, neg */
2016 instr = instr_create(ctx, 2, OPC_SUB_U);
2017 vectorize(ctx, instr, dst, 2, pos_src, 0, neg_src, 0);
2018
2019 put_dst(ctx, inst, dst);
2020 }
2021
2022
2023
2024 /*
2025 * Conditional / Flow control
2026 */
2027
2028 static void
2029 push_branch(struct ir3_compile_context *ctx, bool inv,
2030 struct ir3_instruction *instr, struct ir3_instruction *cond)
2031 {
2032 unsigned int idx = ctx->branch_count++;
2033 compile_assert(ctx, idx < ARRAY_SIZE(ctx->branch));
2034 ctx->branch[idx].instr = instr;
2035 ctx->branch[idx].inv = inv;
2036 /* else side of branch has same condition: */
2037 if (!inv)
2038 ctx->branch[idx].cond = cond;
2039 }
2040
2041 static struct ir3_instruction *
2042 pop_branch(struct ir3_compile_context *ctx)
2043 {
2044 unsigned int idx = --ctx->branch_count;
2045 return ctx->branch[idx].instr;
2046 }
2047
2048 static void
2049 trans_if(const struct instr_translater *t,
2050 struct ir3_compile_context *ctx,
2051 struct tgsi_full_instruction *inst)
2052 {
2053 struct ir3_instruction *instr, *cond;
2054 struct tgsi_src_register *src = &inst->Src[0].Register;
2055 struct tgsi_dst_register tmp_dst;
2056 struct tgsi_src_register *tmp_src;
2057 struct tgsi_src_register constval;
2058
2059 get_immediate(ctx, &constval, fui(0.0));
2060 tmp_src = get_internal_temp(ctx, &tmp_dst);
2061
2062 if (is_const(src))
2063 src = get_unconst(ctx, src);
2064
2065 /* cmps.{f,u}.ne tmp0, b, {0.0} */
2066 instr = instr_create(ctx, 2, t->opc);
2067 add_dst_reg(ctx, instr, &tmp_dst, 0);
2068 add_src_reg(ctx, instr, src, src->SwizzleX);
2069 add_src_reg(ctx, instr, &constval, constval.SwizzleX);
2070 instr->cat2.condition = IR3_COND_NE;
2071
2072 compile_assert(ctx, instr->regs[1]->flags & IR3_REG_SSA); /* because get_unconst() */
2073 cond = instr->regs[1]->instr;
2074
2075 /* meta:flow tmp0 */
2076 instr = instr_create(ctx, -1, OPC_META_FLOW);
2077 ir3_reg_create(instr, 0, 0); /* dummy dst */
2078 add_src_reg(ctx, instr, tmp_src, TGSI_SWIZZLE_X);
2079
2080 push_branch(ctx, false, instr, cond);
2081 instr->flow.if_block = push_block(ctx);
2082 }
2083
2084 static void
2085 trans_else(const struct instr_translater *t,
2086 struct ir3_compile_context *ctx,
2087 struct tgsi_full_instruction *inst)
2088 {
2089 struct ir3_instruction *instr;
2090
2091 pop_block(ctx);
2092
2093 instr = pop_branch(ctx);
2094
2095 compile_assert(ctx, (instr->category == -1) &&
2096 (instr->opc == OPC_META_FLOW));
2097
2098 push_branch(ctx, true, instr, NULL);
2099 instr->flow.else_block = push_block(ctx);
2100 }
2101
2102 static struct ir3_instruction *
2103 find_temporary(struct ir3_block *block, unsigned n)
2104 {
2105 if (block->parent && !block->temporaries[n])
2106 return find_temporary(block->parent, n);
2107 return block->temporaries[n];
2108 }
2109
2110 static struct ir3_instruction *
2111 find_output(struct ir3_block *block, unsigned n)
2112 {
2113 if (block->parent && !block->outputs[n])
2114 return find_output(block->parent, n);
2115 return block->outputs[n];
2116 }
2117
2118 static struct ir3_instruction *
2119 create_phi(struct ir3_compile_context *ctx, struct ir3_instruction *cond,
2120 struct ir3_instruction *a, struct ir3_instruction *b)
2121 {
2122 struct ir3_instruction *phi;
2123
2124 compile_assert(ctx, cond);
2125
2126 /* Either side of the condition could be null.. which
2127 * indicates a variable written on only one side of the
2128 * branch. Normally this should only be variables not
2129 * used outside of that side of the branch. So we could
2130 * just 'return a ? a : b;' in that case. But for better
2131 * defined undefined behavior we just stick in imm{0.0}.
2132 * In the common case of a value only used within the
2133 * one side of the branch, the PHI instruction will not
2134 * get scheduled
2135 */
2136 if (!a)
2137 a = create_immed(ctx, 0.0);
2138 if (!b)
2139 b = create_immed(ctx, 0.0);
2140
2141 phi = instr_create(ctx, -1, OPC_META_PHI);
2142 ir3_reg_create(phi, 0, 0); /* dummy dst */
2143 ir3_reg_create(phi, 0, IR3_REG_SSA)->instr = cond;
2144 ir3_reg_create(phi, 0, IR3_REG_SSA)->instr = a;
2145 ir3_reg_create(phi, 0, IR3_REG_SSA)->instr = b;
2146
2147 return phi;
2148 }
2149
2150 static void
2151 trans_endif(const struct instr_translater *t,
2152 struct ir3_compile_context *ctx,
2153 struct tgsi_full_instruction *inst)
2154 {
2155 struct ir3_instruction *instr;
2156 struct ir3_block *ifb, *elseb;
2157 struct ir3_instruction **ifout, **elseout;
2158 unsigned i, ifnout = 0, elsenout = 0;
2159
2160 pop_block(ctx);
2161
2162 instr = pop_branch(ctx);
2163
2164 compile_assert(ctx, (instr->category == -1) &&
2165 (instr->opc == OPC_META_FLOW));
2166
2167 ifb = instr->flow.if_block;
2168 elseb = instr->flow.else_block;
2169 /* if there is no else block, the parent block is used for the
2170 * branch-not-taken src of the PHI instructions:
2171 */
2172 if (!elseb)
2173 elseb = ifb->parent;
2174
2175 /* worst case sizes: */
2176 ifnout = ifb->ntemporaries + ifb->noutputs;
2177 elsenout = elseb->ntemporaries + elseb->noutputs;
2178
2179 ifout = ir3_alloc(ctx->ir, sizeof(ifb->outputs[0]) * ifnout);
2180 if (elseb != ifb->parent)
2181 elseout = ir3_alloc(ctx->ir, sizeof(ifb->outputs[0]) * elsenout);
2182
2183 ifnout = 0;
2184 elsenout = 0;
2185
2186 /* generate PHI instructions for any temporaries written: */
2187 for (i = 0; i < ifb->ntemporaries; i++) {
2188 struct ir3_instruction *a = ifb->temporaries[i];
2189 struct ir3_instruction *b = elseb->temporaries[i];
2190
2191 /* if temporary written in if-block, or if else block
2192 * is present and temporary written in else-block:
2193 */
2194 if (a || ((elseb != ifb->parent) && b)) {
2195 struct ir3_instruction *phi;
2196
2197 /* if only written on one side, find the closest
2198 * enclosing update on other side:
2199 */
2200 if (!a)
2201 a = find_temporary(ifb, i);
2202 if (!b)
2203 b = find_temporary(elseb, i);
2204
2205 ifout[ifnout] = a;
2206 a = create_output(ifb, a, ifnout++);
2207
2208 if (elseb != ifb->parent) {
2209 elseout[elsenout] = b;
2210 b = create_output(elseb, b, elsenout++);
2211 }
2212
2213 phi = create_phi(ctx, instr, a, b);
2214 ctx->block->temporaries[i] = phi;
2215 }
2216 }
2217
2218 compile_assert(ctx, ifb->noutputs == elseb->noutputs);
2219
2220 /* .. and any outputs written: */
2221 for (i = 0; i < ifb->noutputs; i++) {
2222 struct ir3_instruction *a = ifb->outputs[i];
2223 struct ir3_instruction *b = elseb->outputs[i];
2224
2225 /* if output written in if-block, or if else block
2226 * is present and output written in else-block:
2227 */
2228 if (a || ((elseb != ifb->parent) && b)) {
2229 struct ir3_instruction *phi;
2230
2231 /* if only written on one side, find the closest
2232 * enclosing update on other side:
2233 */
2234 if (!a)
2235 a = find_output(ifb, i);
2236 if (!b)
2237 b = find_output(elseb, i);
2238
2239 ifout[ifnout] = a;
2240 a = create_output(ifb, a, ifnout++);
2241
2242 if (elseb != ifb->parent) {
2243 elseout[elsenout] = b;
2244 b = create_output(elseb, b, elsenout++);
2245 }
2246
2247 phi = create_phi(ctx, instr, a, b);
2248 ctx->block->outputs[i] = phi;
2249 }
2250 }
2251
2252 ifb->noutputs = ifnout;
2253 ifb->outputs = ifout;
2254
2255 if (elseb != ifb->parent) {
2256 elseb->noutputs = elsenout;
2257 elseb->outputs = elseout;
2258 }
2259
2260 // TODO maybe we want to compact block->inputs?
2261 }
2262
2263 /*
2264 * Kill
2265 */
2266
2267 static void
2268 trans_kill(const struct instr_translater *t,
2269 struct ir3_compile_context *ctx,
2270 struct tgsi_full_instruction *inst)
2271 {
2272 struct ir3_instruction *instr, *immed, *cond = NULL;
2273 bool inv = false;
2274
2275 /* unconditional kill, use enclosing if condition: */
2276 if (ctx->branch_count > 0) {
2277 unsigned int idx = ctx->branch_count - 1;
2278 cond = ctx->branch[idx].cond;
2279 inv = ctx->branch[idx].inv;
2280 } else {
2281 cond = create_immed(ctx, 1.0);
2282 }
2283
2284 compile_assert(ctx, cond);
2285
2286 immed = create_immed(ctx, 0.0);
2287
2288 /* cmps.f.ne p0.x, cond, {0.0} */
2289 instr = instr_create(ctx, 2, OPC_CMPS_F);
2290 instr->cat2.condition = IR3_COND_NE;
2291 ir3_reg_create(instr, regid(REG_P0, 0), 0);
2292 ir3_reg_create(instr, 0, IR3_REG_SSA)->instr = cond;
2293 ir3_reg_create(instr, 0, IR3_REG_SSA)->instr = immed;
2294 cond = instr;
2295
2296 /* kill p0.x */
2297 instr = instr_create(ctx, 0, OPC_KILL);
2298 instr->cat0.inv = inv;
2299 ir3_reg_create(instr, 0, 0); /* dummy dst */
2300 ir3_reg_create(instr, 0, IR3_REG_SSA)->instr = cond;
2301
2302 ctx->kill[ctx->kill_count++] = instr;
2303
2304 ctx->so->has_kill = true;
2305 }
2306
2307 /*
2308 * Kill-If
2309 */
2310
2311 static void
2312 trans_killif(const struct instr_translater *t,
2313 struct ir3_compile_context *ctx,
2314 struct tgsi_full_instruction *inst)
2315 {
2316 struct tgsi_src_register *src = &inst->Src[0].Register;
2317 struct ir3_instruction *instr, *immed, *cond = NULL;
2318 bool inv = false;
2319
2320 immed = create_immed(ctx, 0.0);
2321
2322 /* cmps.f.ne p0.x, cond, {0.0} */
2323 instr = instr_create(ctx, 2, OPC_CMPS_F);
2324 instr->cat2.condition = IR3_COND_NE;
2325 ir3_reg_create(instr, regid(REG_P0, 0), 0);
2326 ir3_reg_create(instr, 0, IR3_REG_SSA)->instr = immed;
2327 add_src_reg(ctx, instr, src, src->SwizzleX);
2328
2329 cond = instr;
2330
2331 /* kill p0.x */
2332 instr = instr_create(ctx, 0, OPC_KILL);
2333 instr->cat0.inv = inv;
2334 ir3_reg_create(instr, 0, 0); /* dummy dst */
2335 ir3_reg_create(instr, 0, IR3_REG_SSA)->instr = cond;
2336
2337 ctx->kill[ctx->kill_count++] = instr;
2338
2339 ctx->so->has_kill = true;
2340
2341 }
2342 /*
2343 * I2F / U2F / F2I / F2U
2344 */
2345
2346 static void
2347 trans_cov(const struct instr_translater *t,
2348 struct ir3_compile_context *ctx,
2349 struct tgsi_full_instruction *inst)
2350 {
2351 struct ir3_instruction *instr;
2352 struct tgsi_dst_register *dst = get_dst(ctx, inst);
2353 struct tgsi_src_register *src = &inst->Src[0].Register;
2354
2355 // cov.f32s32 dst, tmp0 /
2356 instr = instr_create(ctx, 1, 0);
2357 switch (t->tgsi_opc) {
2358 case TGSI_OPCODE_U2F:
2359 instr->cat1.src_type = TYPE_U32;
2360 instr->cat1.dst_type = TYPE_F32;
2361 break;
2362 case TGSI_OPCODE_I2F:
2363 instr->cat1.src_type = TYPE_S32;
2364 instr->cat1.dst_type = TYPE_F32;
2365 break;
2366 case TGSI_OPCODE_F2U:
2367 instr->cat1.src_type = TYPE_F32;
2368 instr->cat1.dst_type = TYPE_U32;
2369 break;
2370 case TGSI_OPCODE_F2I:
2371 instr->cat1.src_type = TYPE_F32;
2372 instr->cat1.dst_type = TYPE_S32;
2373 break;
2374
2375 }
2376 vectorize(ctx, instr, dst, 1, src, 0);
2377 put_dst(ctx, inst, dst);
2378 }
2379
2380 /*
2381 * UMUL / UMAD
2382 *
2383 * There is no 32-bit multiply instruction, so splitting a and b into high and
2384 * low components, we get that
2385 *
2386 * dst = al * bl + ah * bl << 16 + al * bh << 16
2387 *
2388 * mull.u tmp0, a, b (mul low, i.e. al * bl)
2389 * madsh.m16 tmp1, a, b, tmp0 (mul-add shift high mix, i.e. ah * bl << 16)
2390 * madsh.m16 dst, b, a, tmp1 (i.e. al * bh << 16)
2391 *
2392 * For UMAD, add in the extra argument after mull.u.
2393 */
2394 static void
2395 trans_umul(const struct instr_translater *t,
2396 struct ir3_compile_context *ctx,
2397 struct tgsi_full_instruction *inst)
2398 {
2399 struct ir3_instruction *instr;
2400 struct tgsi_dst_register *dst = get_dst(ctx, inst);
2401 struct tgsi_src_register *a = &inst->Src[0].Register;
2402 struct tgsi_src_register *b = &inst->Src[1].Register;
2403
2404 struct tgsi_dst_register tmp0_dst, tmp1_dst;
2405 struct tgsi_src_register *tmp0_src, *tmp1_src;
2406
2407 tmp0_src = get_internal_temp(ctx, &tmp0_dst);
2408 tmp1_src = get_internal_temp(ctx, &tmp1_dst);
2409
2410 if (is_rel_or_const(a))
2411 a = get_unconst(ctx, a);
2412 if (is_rel_or_const(b))
2413 b = get_unconst(ctx, b);
2414
2415 /* mull.u tmp0, a, b */
2416 instr = instr_create(ctx, 2, OPC_MULL_U);
2417 vectorize(ctx, instr, &tmp0_dst, 2, a, 0, b, 0);
2418
2419 if (t->tgsi_opc == TGSI_OPCODE_UMAD) {
2420 struct tgsi_src_register *c = &inst->Src[2].Register;
2421
2422 /* add.u tmp0, tmp0, c */
2423 instr = instr_create(ctx, 2, OPC_ADD_U);
2424 vectorize(ctx, instr, &tmp0_dst, 2, tmp0_src, 0, c, 0);
2425 }
2426
2427 /* madsh.m16 tmp1, a, b, tmp0 */
2428 instr = instr_create(ctx, 3, OPC_MADSH_M16);
2429 vectorize(ctx, instr, &tmp1_dst, 3, a, 0, b, 0, tmp0_src, 0);
2430
2431 /* madsh.m16 dst, b, a, tmp1 */
2432 instr = instr_create(ctx, 3, OPC_MADSH_M16);
2433 vectorize(ctx, instr, dst, 3, b, 0, a, 0, tmp1_src, 0);
2434 put_dst(ctx, inst, dst);
2435 }
2436
2437 /*
2438 * IDIV / UDIV / MOD / UMOD
2439 *
2440 * See NV50LegalizeSSA::handleDIV for the origin of this implementation. For
2441 * MOD/UMOD, it becomes a - [IU]DIV(a, modulus) * modulus.
2442 */
2443 static void
2444 trans_idiv(const struct instr_translater *t,
2445 struct ir3_compile_context *ctx,
2446 struct tgsi_full_instruction *inst)
2447 {
2448 struct ir3_instruction *instr;
2449 struct tgsi_dst_register *dst = get_dst(ctx, inst), *premod_dst = dst;
2450 struct tgsi_src_register *a = &inst->Src[0].Register;
2451 struct tgsi_src_register *b = &inst->Src[1].Register;
2452
2453 struct tgsi_dst_register af_dst, bf_dst, q_dst, r_dst, a_dst, b_dst;
2454 struct tgsi_src_register *af_src, *bf_src, *q_src, *r_src, *a_src, *b_src;
2455
2456 struct tgsi_src_register negative_2, thirty_one;
2457 type_t src_type;
2458
2459 if (t->tgsi_opc == TGSI_OPCODE_IDIV || t->tgsi_opc == TGSI_OPCODE_MOD)
2460 src_type = get_stype(ctx);
2461 else
2462 src_type = get_utype(ctx);
2463
2464 af_src = get_internal_temp(ctx, &af_dst);
2465 bf_src = get_internal_temp(ctx, &bf_dst);
2466 q_src = get_internal_temp(ctx, &q_dst);
2467 r_src = get_internal_temp(ctx, &r_dst);
2468 a_src = get_internal_temp(ctx, &a_dst);
2469 b_src = get_internal_temp(ctx, &b_dst);
2470
2471 get_immediate(ctx, &negative_2, -2);
2472 get_immediate(ctx, &thirty_one, 31);
2473
2474 if (t->tgsi_opc == TGSI_OPCODE_MOD || t->tgsi_opc == TGSI_OPCODE_UMOD)
2475 premod_dst = &q_dst;
2476
2477 /* cov.[us]32f32 af, numerator */
2478 instr = instr_create(ctx, 1, 0);
2479 instr->cat1.src_type = src_type;
2480 instr->cat1.dst_type = get_ftype(ctx);
2481 vectorize(ctx, instr, &af_dst, 1, a, 0);
2482
2483 /* cov.[us]32f32 bf, denominator */
2484 instr = instr_create(ctx, 1, 0);
2485 instr->cat1.src_type = src_type;
2486 instr->cat1.dst_type = get_ftype(ctx);
2487 vectorize(ctx, instr, &bf_dst, 1, b, 0);
2488
2489 /* Get the absolute values for IDIV */
2490 if (type_sint(src_type)) {
2491 /* absneg.f af, (abs)af */
2492 instr = instr_create(ctx, 2, OPC_ABSNEG_F);
2493 vectorize(ctx, instr, &af_dst, 1, af_src, IR3_REG_ABS);
2494
2495 /* absneg.f bf, (abs)bf */
2496 instr = instr_create(ctx, 2, OPC_ABSNEG_F);
2497 vectorize(ctx, instr, &bf_dst, 1, bf_src, IR3_REG_ABS);
2498
2499 /* absneg.s a, (abs)numerator */
2500 instr = instr_create(ctx, 2, OPC_ABSNEG_S);
2501 vectorize(ctx, instr, &a_dst, 1, a, IR3_REG_ABS);
2502
2503 /* absneg.s b, (abs)denominator */
2504 instr = instr_create(ctx, 2, OPC_ABSNEG_S);
2505 vectorize(ctx, instr, &b_dst, 1, b, IR3_REG_ABS);
2506 } else {
2507 /* mov.u32u32 a, numerator */
2508 instr = instr_create(ctx, 1, 0);
2509 instr->cat1.src_type = src_type;
2510 instr->cat1.dst_type = src_type;
2511 vectorize(ctx, instr, &a_dst, 1, a, 0);
2512
2513 /* mov.u32u32 b, denominator */
2514 instr = instr_create(ctx, 1, 0);
2515 instr->cat1.src_type = src_type;
2516 instr->cat1.dst_type = src_type;
2517 vectorize(ctx, instr, &b_dst, 1, b, 0);
2518 }
2519
2520 /* rcp.f bf, bf */
2521 instr = instr_create(ctx, 4, OPC_RCP);
2522 vectorize(ctx, instr, &bf_dst, 1, bf_src, 0);
2523
2524 /* That's right, subtract 2 as an integer from the float */
2525 /* add.u bf, bf, -2 */
2526 instr = instr_create(ctx, 2, OPC_ADD_U);
2527 vectorize(ctx, instr, &bf_dst, 2, bf_src, 0, &negative_2, 0);
2528
2529 /* mul.f q, af, bf */
2530 instr = instr_create(ctx, 2, OPC_MUL_F);
2531 vectorize(ctx, instr, &q_dst, 2, af_src, 0, bf_src, 0);
2532
2533 /* cov.f32[us]32 q, q */
2534 instr = instr_create(ctx, 1, 0);
2535 instr->cat1.src_type = get_ftype(ctx);
2536 instr->cat1.dst_type = src_type;
2537 vectorize(ctx, instr, &q_dst, 1, q_src, 0);
2538
2539 /* integer multiply q by b */
2540 /* mull.u r, q, b */
2541 instr = instr_create(ctx, 2, OPC_MULL_U);
2542 vectorize(ctx, instr, &r_dst, 2, q_src, 0, b_src, 0);
2543
2544 /* madsh.m16 r, q, b, r */
2545 instr = instr_create(ctx, 3, OPC_MADSH_M16);
2546 vectorize(ctx, instr, &r_dst, 3, q_src, 0, b_src, 0, r_src, 0);
2547
2548 /* madsh.m16, r, b, q, r */
2549 instr = instr_create(ctx, 3, OPC_MADSH_M16);
2550 vectorize(ctx, instr, &r_dst, 3, b_src, 0, q_src, 0, r_src, 0);
2551
2552 /* sub.u r, a, r */
2553 instr = instr_create(ctx, 2, OPC_SUB_U);
2554 vectorize(ctx, instr, &r_dst, 2, a_src, 0, r_src, 0);
2555
2556 /* cov.u32f32, r, r */
2557 instr = instr_create(ctx, 1, 0);
2558 instr->cat1.src_type = get_utype(ctx);
2559 instr->cat1.dst_type = get_ftype(ctx);
2560 vectorize(ctx, instr, &r_dst, 1, r_src, 0);
2561
2562 /* mul.f r, r, bf */
2563 instr = instr_create(ctx, 2, OPC_MUL_F);
2564 vectorize(ctx, instr, &r_dst, 2, r_src, 0, bf_src, 0);
2565
2566 /* cov.f32u32 r, r */
2567 instr = instr_create(ctx, 1, 0);
2568 instr->cat1.src_type = get_ftype(ctx);
2569 instr->cat1.dst_type = get_utype(ctx);
2570 vectorize(ctx, instr, &r_dst, 1, r_src, 0);
2571
2572 /* add.u q, q, r */
2573 instr = instr_create(ctx, 2, OPC_ADD_U);
2574 vectorize(ctx, instr, &q_dst, 2, q_src, 0, r_src, 0);
2575
2576 /* mull.u r, q, b */
2577 instr = instr_create(ctx, 2, OPC_MULL_U);
2578 vectorize(ctx, instr, &r_dst, 2, q_src, 0, b_src, 0);
2579
2580 /* madsh.m16 r, q, b, r */
2581 instr = instr_create(ctx, 3, OPC_MADSH_M16);
2582 vectorize(ctx, instr, &r_dst, 3, q_src, 0, b_src, 0, r_src, 0);
2583
2584 /* madsh.m16 r, b, q, r */
2585 instr = instr_create(ctx, 3, OPC_MADSH_M16);
2586 vectorize(ctx, instr, &r_dst, 3, b_src, 0, q_src, 0, r_src, 0);
2587
2588 /* sub.u r, a, r */
2589 instr = instr_create(ctx, 2, OPC_SUB_U);
2590 vectorize(ctx, instr, &r_dst, 2, a_src, 0, r_src, 0);
2591
2592 /* cmps.u.ge r, r, b */
2593 instr = instr_create(ctx, 2, OPC_CMPS_U);
2594 instr->cat2.condition = IR3_COND_GE;
2595 vectorize(ctx, instr, &r_dst, 2, r_src, 0, b_src, 0);
2596
2597 if (type_uint(src_type)) {
2598 /* add.u dst, q, r */
2599 instr = instr_create(ctx, 2, OPC_ADD_U);
2600 vectorize(ctx, instr, premod_dst, 2, q_src, 0, r_src, 0);
2601 } else {
2602 /* add.u q, q, r */
2603 instr = instr_create(ctx, 2, OPC_ADD_U);
2604 vectorize(ctx, instr, &q_dst, 2, q_src, 0, r_src, 0);
2605
2606 /* negate result based on the original arguments */
2607 if (is_const(a) && is_const(b))
2608 a = get_unconst(ctx, a);
2609
2610 /* xor.b r, numerator, denominator */
2611 instr = instr_create(ctx, 2, OPC_XOR_B);
2612 vectorize(ctx, instr, &r_dst, 2, a, 0, b, 0);
2613
2614 /* shr.b r, r, 31 */
2615 instr = instr_create(ctx, 2, OPC_SHR_B);
2616 vectorize(ctx, instr, &r_dst, 2, r_src, 0, &thirty_one, 0);
2617
2618 /* absneg.s b, (neg)q */
2619 instr = instr_create(ctx, 2, OPC_ABSNEG_S);
2620 vectorize(ctx, instr, &b_dst, 1, q_src, IR3_REG_NEGATE);
2621
2622 /* sel.b dst, b, r, q */
2623 instr = instr_create(ctx, 3, OPC_SEL_B32);
2624 vectorize(ctx, instr, premod_dst, 3, b_src, 0, r_src, 0, q_src, 0);
2625 }
2626
2627 if (t->tgsi_opc == TGSI_OPCODE_MOD || t->tgsi_opc == TGSI_OPCODE_UMOD) {
2628 /* The division result will have ended up in q. */
2629
2630 if (is_rel_or_const(b))
2631 b = get_unconst(ctx, b);
2632
2633 /* mull.u r, q, b */
2634 instr = instr_create(ctx, 2, OPC_MULL_U);
2635 vectorize(ctx, instr, &r_dst, 2, q_src, 0, b, 0);
2636
2637 /* madsh.m16 r, q, b, r */
2638 instr = instr_create(ctx, 3, OPC_MADSH_M16);
2639 vectorize(ctx, instr, &r_dst, 3, q_src, 0, b, 0, r_src, 0);
2640
2641 /* madsh.m16 r, b, q, r */
2642 instr = instr_create(ctx, 3, OPC_MADSH_M16);
2643 vectorize(ctx, instr, &r_dst, 3, b, 0, q_src, 0, r_src, 0);
2644
2645 /* sub.u dst, a, r */
2646 instr = instr_create(ctx, 2, OPC_SUB_U);
2647 vectorize(ctx, instr, dst, 2, a, 0, r_src, 0);
2648 }
2649
2650 put_dst(ctx, inst, dst);
2651 }
2652
2653 /*
2654 * Handlers for TGSI instructions which do have 1:1 mapping to native
2655 * instructions:
2656 */
2657
2658 static void
2659 instr_cat0(const struct instr_translater *t,
2660 struct ir3_compile_context *ctx,
2661 struct tgsi_full_instruction *inst)
2662 {
2663 instr_create(ctx, 0, t->opc);
2664 }
2665
2666 static void
2667 instr_cat1(const struct instr_translater *t,
2668 struct ir3_compile_context *ctx,
2669 struct tgsi_full_instruction *inst)
2670 {
2671 struct tgsi_dst_register *dst = &inst->Dst[0].Register;
2672 struct tgsi_src_register *src = &inst->Src[0].Register;
2673
2674 /* NOTE: atomic start/end, rather than in create_mov() since
2675 * create_mov() is used already w/in atomic sequences (and
2676 * we aren't clever enough to deal with the nesting)
2677 */
2678 instr_atomic_start(ctx);
2679 create_mov(ctx, dst, src);
2680 instr_atomic_end(ctx);
2681 }
2682
2683 static void
2684 instr_cat2(const struct instr_translater *t,
2685 struct ir3_compile_context *ctx,
2686 struct tgsi_full_instruction *inst)
2687 {
2688 struct tgsi_dst_register *dst = get_dst(ctx, inst);
2689 struct tgsi_src_register *src0 = &inst->Src[0].Register;
2690 struct tgsi_src_register *src1 = &inst->Src[1].Register;
2691 struct ir3_instruction *instr;
2692 unsigned src0_flags = 0, src1_flags = 0;
2693
2694 switch (t->tgsi_opc) {
2695 case TGSI_OPCODE_ABS:
2696 case TGSI_OPCODE_IABS:
2697 src0_flags = IR3_REG_ABS;
2698 break;
2699 case TGSI_OPCODE_INEG:
2700 src0_flags = IR3_REG_NEGATE;
2701 break;
2702 case TGSI_OPCODE_SUB:
2703 src1_flags = IR3_REG_NEGATE;
2704 break;
2705 }
2706
2707 switch (t->opc) {
2708 case OPC_ABSNEG_F:
2709 case OPC_ABSNEG_S:
2710 case OPC_CLZ_B:
2711 case OPC_CLZ_S:
2712 case OPC_SIGN_F:
2713 case OPC_FLOOR_F:
2714 case OPC_CEIL_F:
2715 case OPC_RNDNE_F:
2716 case OPC_RNDAZ_F:
2717 case OPC_TRUNC_F:
2718 case OPC_NOT_B:
2719 case OPC_BFREV_B:
2720 case OPC_SETRM:
2721 case OPC_CBITS_B:
2722 /* these only have one src reg */
2723 instr = instr_create(ctx, 2, t->opc);
2724 vectorize(ctx, instr, dst, 1, src0, src0_flags);
2725 break;
2726 default:
2727 if (is_const(src0) && is_const(src1))
2728 src0 = get_unconst(ctx, src0);
2729
2730 instr = instr_create(ctx, 2, t->opc);
2731 vectorize(ctx, instr, dst, 2, src0, src0_flags,
2732 src1, src1_flags);
2733 break;
2734 }
2735
2736 put_dst(ctx, inst, dst);
2737 }
2738
2739 static void
2740 instr_cat3(const struct instr_translater *t,
2741 struct ir3_compile_context *ctx,
2742 struct tgsi_full_instruction *inst)
2743 {
2744 struct tgsi_dst_register *dst = get_dst(ctx, inst);
2745 struct tgsi_src_register *src0 = &inst->Src[0].Register;
2746 struct tgsi_src_register *src1 = &inst->Src[1].Register;
2747 struct ir3_instruction *instr;
2748
2749 /* in particular, can't handle const for src1 for cat3..
2750 * for mad, we can swap first two src's if needed:
2751 */
2752 if (is_rel_or_const(src1)) {
2753 if (is_mad(t->opc) && !is_rel_or_const(src0)) {
2754 struct tgsi_src_register *tmp;
2755 tmp = src0;
2756 src0 = src1;
2757 src1 = tmp;
2758 } else {
2759 src1 = get_unconst(ctx, src1);
2760 }
2761 }
2762
2763 instr = instr_create(ctx, 3, t->opc);
2764 vectorize(ctx, instr, dst, 3, src0, 0, src1, 0,
2765 &inst->Src[2].Register, 0);
2766 put_dst(ctx, inst, dst);
2767 }
2768
2769 static void
2770 instr_cat4(const struct instr_translater *t,
2771 struct ir3_compile_context *ctx,
2772 struct tgsi_full_instruction *inst)
2773 {
2774 struct tgsi_dst_register *dst = get_dst(ctx, inst);
2775 struct tgsi_src_register *src = &inst->Src[0].Register;
2776 struct ir3_instruction *instr;
2777 unsigned i;
2778
2779 /* seems like blob compiler avoids const as src.. */
2780 if (is_const(src))
2781 src = get_unconst(ctx, src);
2782
2783 /* we need to replicate into each component: */
2784 for (i = 0; i < 4; i++) {
2785 if (dst->WriteMask & (1 << i)) {
2786 instr = instr_create(ctx, 4, t->opc);
2787 add_dst_reg(ctx, instr, dst, i);
2788 add_src_reg(ctx, instr, src, src->SwizzleX);
2789 }
2790 }
2791
2792 put_dst(ctx, inst, dst);
2793 }
2794
2795 static const struct instr_translater translaters[TGSI_OPCODE_LAST] = {
2796 #define INSTR(n, f, ...) \
2797 [TGSI_OPCODE_ ## n] = { .fxn = (f), .tgsi_opc = TGSI_OPCODE_ ## n, ##__VA_ARGS__ }
2798
2799 INSTR(MOV, instr_cat1),
2800 INSTR(RCP, instr_cat4, .opc = OPC_RCP),
2801 INSTR(RSQ, instr_cat4, .opc = OPC_RSQ),
2802 INSTR(SQRT, instr_cat4, .opc = OPC_SQRT),
2803 INSTR(MUL, instr_cat2, .opc = OPC_MUL_F),
2804 INSTR(ADD, instr_cat2, .opc = OPC_ADD_F),
2805 INSTR(SUB, instr_cat2, .opc = OPC_ADD_F),
2806 INSTR(MIN, instr_cat2, .opc = OPC_MIN_F),
2807 INSTR(MAX, instr_cat2, .opc = OPC_MAX_F),
2808 INSTR(UADD, instr_cat2, .opc = OPC_ADD_U),
2809 INSTR(IMIN, instr_cat2, .opc = OPC_MIN_S),
2810 INSTR(UMIN, instr_cat2, .opc = OPC_MIN_U),
2811 INSTR(IMAX, instr_cat2, .opc = OPC_MAX_S),
2812 INSTR(UMAX, instr_cat2, .opc = OPC_MAX_U),
2813 INSTR(AND, instr_cat2, .opc = OPC_AND_B),
2814 INSTR(OR, instr_cat2, .opc = OPC_OR_B),
2815 INSTR(NOT, instr_cat2, .opc = OPC_NOT_B),
2816 INSTR(XOR, instr_cat2, .opc = OPC_XOR_B),
2817 INSTR(UMUL, trans_umul),
2818 INSTR(UMAD, trans_umul),
2819 INSTR(UDIV, trans_idiv),
2820 INSTR(IDIV, trans_idiv),
2821 INSTR(MOD, trans_idiv),
2822 INSTR(UMOD, trans_idiv),
2823 INSTR(SHL, instr_cat2, .opc = OPC_SHL_B),
2824 INSTR(USHR, instr_cat2, .opc = OPC_SHR_B),
2825 INSTR(ISHR, instr_cat2, .opc = OPC_ASHR_B),
2826 INSTR(IABS, instr_cat2, .opc = OPC_ABSNEG_S),
2827 INSTR(INEG, instr_cat2, .opc = OPC_ABSNEG_S),
2828 INSTR(AND, instr_cat2, .opc = OPC_AND_B),
2829 INSTR(MAD, instr_cat3, .opc = OPC_MAD_F32, .hopc = OPC_MAD_F16),
2830 INSTR(TRUNC, instr_cat2, .opc = OPC_TRUNC_F),
2831 INSTR(CLAMP, trans_clamp),
2832 INSTR(FLR, instr_cat2, .opc = OPC_FLOOR_F),
2833 INSTR(ROUND, instr_cat2, .opc = OPC_RNDNE_F),
2834 INSTR(SSG, instr_cat2, .opc = OPC_SIGN_F),
2835 INSTR(CEIL, instr_cat2, .opc = OPC_CEIL_F),
2836 INSTR(ARL, trans_arl),
2837 INSTR(UARL, trans_arl),
2838 INSTR(EX2, instr_cat4, .opc = OPC_EXP2),
2839 INSTR(LG2, instr_cat4, .opc = OPC_LOG2),
2840 INSTR(ABS, instr_cat2, .opc = OPC_ABSNEG_F),
2841 INSTR(COS, instr_cat4, .opc = OPC_COS),
2842 INSTR(SIN, instr_cat4, .opc = OPC_SIN),
2843 INSTR(TEX, trans_samp, .opc = OPC_SAM),
2844 INSTR(TXP, trans_samp, .opc = OPC_SAM),
2845 INSTR(TXB, trans_samp, .opc = OPC_SAMB),
2846 INSTR(TXB2, trans_samp, .opc = OPC_SAMB),
2847 INSTR(TXL, trans_samp, .opc = OPC_SAML),
2848 INSTR(TXD, trans_samp, .opc = OPC_SAMGQ),
2849 INSTR(TXF, trans_samp, .opc = OPC_ISAML),
2850 INSTR(TXQ, trans_txq),
2851 INSTR(DDX, trans_deriv, .opc = OPC_DSX),
2852 INSTR(DDY, trans_deriv, .opc = OPC_DSY),
2853 INSTR(SGT, trans_cmp),
2854 INSTR(SLT, trans_cmp),
2855 INSTR(FSLT, trans_cmp),
2856 INSTR(SGE, trans_cmp),
2857 INSTR(FSGE, trans_cmp),
2858 INSTR(SLE, trans_cmp),
2859 INSTR(SNE, trans_cmp),
2860 INSTR(FSNE, trans_cmp),
2861 INSTR(SEQ, trans_cmp),
2862 INSTR(FSEQ, trans_cmp),
2863 INSTR(CMP, trans_cmp),
2864 INSTR(USNE, trans_icmp, .opc = OPC_CMPS_U),
2865 INSTR(USEQ, trans_icmp, .opc = OPC_CMPS_U),
2866 INSTR(ISGE, trans_icmp, .opc = OPC_CMPS_S),
2867 INSTR(USGE, trans_icmp, .opc = OPC_CMPS_U),
2868 INSTR(ISLT, trans_icmp, .opc = OPC_CMPS_S),
2869 INSTR(USLT, trans_icmp, .opc = OPC_CMPS_U),
2870 INSTR(UCMP, trans_ucmp),
2871 INSTR(ISSG, trans_issg),
2872 INSTR(IF, trans_if, .opc = OPC_CMPS_F),
2873 INSTR(UIF, trans_if, .opc = OPC_CMPS_U),
2874 INSTR(ELSE, trans_else),
2875 INSTR(ENDIF, trans_endif),
2876 INSTR(END, instr_cat0, .opc = OPC_END),
2877 INSTR(KILL, trans_kill, .opc = OPC_KILL),
2878 INSTR(KILL_IF, trans_killif, .opc = OPC_KILL),
2879 INSTR(I2F, trans_cov),
2880 INSTR(U2F, trans_cov),
2881 INSTR(F2I, trans_cov),
2882 INSTR(F2U, trans_cov),
2883 };
2884
2885 static ir3_semantic
2886 decl_semantic(const struct tgsi_declaration_semantic *sem)
2887 {
2888 return ir3_semantic_name(sem->Name, sem->Index);
2889 }
2890
2891 static struct ir3_instruction *
2892 decl_in_frag_bary(struct ir3_compile_context *ctx, unsigned regid,
2893 unsigned j, unsigned inloc, bool use_ldlv)
2894 {
2895 struct ir3_instruction *instr;
2896 struct ir3_register *src;
2897
2898 if (use_ldlv) {
2899 /* ldlv.u32 dst, l[#inloc], 1 */
2900 instr = instr_create(ctx, 6, OPC_LDLV);
2901 instr->cat6.type = TYPE_U32;
2902 instr->cat6.iim_val = 1;
2903 ir3_reg_create(instr, regid, 0); /* dummy dst */
2904 ir3_reg_create(instr, 0, IR3_REG_IMMED)->iim_val = inloc;
2905 ir3_reg_create(instr, 0, IR3_REG_IMMED)->iim_val = 1;
2906
2907 return instr;
2908 }
2909
2910 /* bary.f dst, #inloc, r0.x */
2911 instr = instr_create(ctx, 2, OPC_BARY_F);
2912 ir3_reg_create(instr, regid, 0); /* dummy dst */
2913 ir3_reg_create(instr, 0, IR3_REG_IMMED)->iim_val = inloc;
2914 src = ir3_reg_create(instr, 0, IR3_REG_SSA);
2915 src->wrmask = 0x3;
2916 src->instr = ctx->frag_pos;
2917
2918 return instr;
2919 }
2920
2921 /* TGSI_SEMANTIC_POSITION
2922 * """"""""""""""""""""""
2923 *
2924 * For fragment shaders, TGSI_SEMANTIC_POSITION is used to indicate that
2925 * fragment shader input contains the fragment's window position. The X
2926 * component starts at zero and always increases from left to right.
2927 * The Y component starts at zero and always increases but Y=0 may either
2928 * indicate the top of the window or the bottom depending on the fragment
2929 * coordinate origin convention (see TGSI_PROPERTY_FS_COORD_ORIGIN).
2930 * The Z coordinate ranges from 0 to 1 to represent depth from the front
2931 * to the back of the Z buffer. The W component contains the reciprocol
2932 * of the interpolated vertex position W component.
2933 */
2934 static struct ir3_instruction *
2935 decl_in_frag_coord(struct ir3_compile_context *ctx, unsigned regid,
2936 unsigned j)
2937 {
2938 struct ir3_instruction *instr, *src;
2939
2940 compile_assert(ctx, !ctx->frag_coord[j]);
2941
2942 ctx->frag_coord[j] = create_input(ctx->block, NULL, 0);
2943
2944
2945 switch (j) {
2946 case 0: /* .x */
2947 case 1: /* .y */
2948 /* for frag_coord, we get unsigned values.. we need
2949 * to subtract (integer) 8 and divide by 16 (right-
2950 * shift by 4) then convert to float:
2951 */
2952
2953 /* add.s tmp, src, -8 */
2954 instr = instr_create(ctx, 2, OPC_ADD_S);
2955 ir3_reg_create(instr, regid, 0); /* dummy dst */
2956 ir3_reg_create(instr, 0, IR3_REG_SSA)->instr = ctx->frag_coord[j];
2957 ir3_reg_create(instr, 0, IR3_REG_IMMED)->iim_val = -8;
2958 src = instr;
2959
2960 /* shr.b tmp, tmp, 4 */
2961 instr = instr_create(ctx, 2, OPC_SHR_B);
2962 ir3_reg_create(instr, regid, 0); /* dummy dst */
2963 ir3_reg_create(instr, 0, IR3_REG_SSA)->instr = src;
2964 ir3_reg_create(instr, 0, IR3_REG_IMMED)->iim_val = 4;
2965 src = instr;
2966
2967 /* mov.u32f32 dst, tmp */
2968 instr = instr_create(ctx, 1, 0);
2969 instr->cat1.src_type = TYPE_U32;
2970 instr->cat1.dst_type = TYPE_F32;
2971 ir3_reg_create(instr, regid, 0); /* dummy dst */
2972 ir3_reg_create(instr, 0, IR3_REG_SSA)->instr = src;
2973
2974 break;
2975 case 2: /* .z */
2976 case 3: /* .w */
2977 /* seems that we can use these as-is: */
2978 instr = ctx->frag_coord[j];
2979 break;
2980 default:
2981 compile_error(ctx, "invalid channel\n");
2982 instr = create_immed(ctx, 0.0);
2983 break;
2984 }
2985
2986 return instr;
2987 }
2988
2989 /* TGSI_SEMANTIC_FACE
2990 * """"""""""""""""""
2991 *
2992 * This label applies to fragment shader inputs only and indicates that
2993 * the register contains front/back-face information of the form (F, 0,
2994 * 0, 1). The first component will be positive when the fragment belongs
2995 * to a front-facing polygon, and negative when the fragment belongs to a
2996 * back-facing polygon.
2997 */
2998 static struct ir3_instruction *
2999 decl_in_frag_face(struct ir3_compile_context *ctx, unsigned regid,
3000 unsigned j)
3001 {
3002 struct ir3_instruction *instr, *src;
3003
3004 switch (j) {
3005 case 0: /* .x */
3006 compile_assert(ctx, !ctx->frag_face);
3007
3008 ctx->frag_face = create_input(ctx->block, NULL, 0);
3009
3010 /* for faceness, we always get -1 or 0 (int).. but TGSI expects
3011 * positive vs negative float.. and piglit further seems to
3012 * expect -1.0 or 1.0:
3013 *
3014 * mul.s tmp, hr0.x, 2
3015 * add.s tmp, tmp, 1
3016 * mov.s16f32, dst, tmp
3017 *
3018 */
3019
3020 instr = instr_create(ctx, 2, OPC_MUL_S);
3021 ir3_reg_create(instr, regid, 0); /* dummy dst */
3022 ir3_reg_create(instr, 0, IR3_REG_SSA)->instr = ctx->frag_face;
3023 ir3_reg_create(instr, 0, IR3_REG_IMMED)->iim_val = 2;
3024 src = instr;
3025
3026 instr = instr_create(ctx, 2, OPC_ADD_S);
3027 ir3_reg_create(instr, regid, 0); /* dummy dst */
3028 ir3_reg_create(instr, 0, IR3_REG_SSA)->instr = src;
3029 ir3_reg_create(instr, 0, IR3_REG_IMMED)->iim_val = 1;
3030 src = instr;
3031
3032 instr = instr_create(ctx, 1, 0); /* mov */
3033 instr->cat1.src_type = TYPE_S32;
3034 instr->cat1.dst_type = TYPE_F32;
3035 ir3_reg_create(instr, regid, 0); /* dummy dst */
3036 ir3_reg_create(instr, 0, IR3_REG_SSA)->instr = src;
3037
3038 break;
3039 case 1: /* .y */
3040 case 2: /* .z */
3041 instr = create_immed(ctx, 0.0);
3042 break;
3043 case 3: /* .w */
3044 instr = create_immed(ctx, 1.0);
3045 break;
3046 default:
3047 compile_error(ctx, "invalid channel\n");
3048 instr = create_immed(ctx, 0.0);
3049 break;
3050 }
3051
3052 return instr;
3053 }
3054
3055 static void
3056 decl_in(struct ir3_compile_context *ctx, struct tgsi_full_declaration *decl)
3057 {
3058 struct ir3_shader_variant *so = ctx->so;
3059 unsigned name = decl->Semantic.Name;
3060 unsigned i;
3061
3062 /* I don't think we should get frag shader input without
3063 * semantic info? Otherwise how do inputs get linked to
3064 * vert outputs?
3065 */
3066 compile_assert(ctx, (ctx->type == TGSI_PROCESSOR_VERTEX) ||
3067 decl->Declaration.Semantic);
3068
3069 for (i = decl->Range.First; i <= decl->Range.Last; i++) {
3070 unsigned n = so->inputs_count++;
3071 unsigned r = regid(i, 0);
3072 unsigned ncomp, j;
3073
3074 /* we'll figure out the actual components used after scheduling */
3075 ncomp = 4;
3076
3077 DBG("decl in -> r%d", i);
3078
3079 compile_assert(ctx, n < ARRAY_SIZE(so->inputs));
3080
3081 so->inputs[n].semantic = decl_semantic(&decl->Semantic);
3082 so->inputs[n].compmask = (1 << ncomp) - 1;
3083 so->inputs[n].regid = r;
3084 so->inputs[n].inloc = ctx->next_inloc;
3085 so->inputs[n].interpolate = decl->Interp.Interpolate;
3086
3087 for (j = 0; j < ncomp; j++) {
3088 struct ir3_instruction *instr = NULL;
3089
3090 if (ctx->type == TGSI_PROCESSOR_FRAGMENT) {
3091 /* for fragment shaders, POSITION and FACE are handled
3092 * specially, not using normal varying / bary.f
3093 */
3094 if (name == TGSI_SEMANTIC_POSITION) {
3095 so->inputs[n].bary = false;
3096 so->frag_coord = true;
3097 instr = decl_in_frag_coord(ctx, r + j, j);
3098 } else if (name == TGSI_SEMANTIC_FACE) {
3099 so->inputs[n].bary = false;
3100 so->frag_face = true;
3101 instr = decl_in_frag_face(ctx, r + j, j);
3102 } else {
3103 bool use_ldlv = false;
3104
3105 /* I don't believe it is valid to not have Interp
3106 * on a normal frag shader input, and various parts
3107 * that that handle flat/smooth shading make this
3108 * assumption as well.
3109 */
3110 compile_assert(ctx, decl->Declaration.Interpolate);
3111
3112 if (ctx->flat_bypass) {
3113 switch (decl->Interp.Interpolate) {
3114 case TGSI_INTERPOLATE_COLOR:
3115 if (!ctx->so->key.rasterflat)
3116 break;
3117 /* fallthrough */
3118 case TGSI_INTERPOLATE_CONSTANT:
3119 use_ldlv = true;
3120 break;
3121 }
3122 }
3123
3124 so->inputs[n].bary = true;
3125
3126 instr = decl_in_frag_bary(ctx, r + j, j,
3127 so->inputs[n].inloc + j - 8, use_ldlv);
3128 }
3129 } else {
3130 instr = create_input(ctx->block, NULL, (i * 4) + j);
3131 }
3132
3133 ctx->block->inputs[(i * 4) + j] = instr;
3134 }
3135
3136 if (so->inputs[n].bary || (ctx->type == TGSI_PROCESSOR_VERTEX)) {
3137 ctx->next_inloc += ncomp;
3138 so->total_in += ncomp;
3139 }
3140 }
3141 }
3142
3143 static void
3144 decl_sv(struct ir3_compile_context *ctx, struct tgsi_full_declaration *decl)
3145 {
3146 struct ir3_shader_variant *so = ctx->so;
3147 unsigned r = regid(so->inputs_count, 0);
3148 unsigned n = so->inputs_count++;
3149
3150 DBG("decl sv -> r%d", n);
3151
3152 compile_assert(ctx, n < ARRAY_SIZE(so->inputs));
3153 compile_assert(ctx, decl->Range.First < ARRAY_SIZE(ctx->sysval_semantics));
3154
3155 ctx->sysval_semantics[decl->Range.First] = decl->Semantic.Name;
3156 so->inputs[n].semantic = decl_semantic(&decl->Semantic);
3157 so->inputs[n].compmask = 1;
3158 so->inputs[n].regid = r;
3159 so->inputs[n].inloc = ctx->next_inloc;
3160 so->inputs[n].interpolate = false;
3161
3162 struct ir3_instruction *instr = NULL;
3163
3164 switch (decl->Semantic.Name) {
3165 case TGSI_SEMANTIC_VERTEXID_NOBASE:
3166 ctx->vertex_id = instr = create_input(ctx->block, NULL, r);
3167 break;
3168 case TGSI_SEMANTIC_BASEVERTEX:
3169 ctx->basevertex = instr = instr_create(ctx, 1, 0);
3170 instr->cat1.src_type = get_stype(ctx);
3171 instr->cat1.dst_type = get_stype(ctx);
3172 ir3_reg_create(instr, 0, 0);
3173 ir3_reg_create(instr, regid(so->first_driver_param, 0), IR3_REG_CONST);
3174 break;
3175 case TGSI_SEMANTIC_INSTANCEID:
3176 ctx->instance_id = instr = create_input(ctx->block, NULL, r);
3177 break;
3178 default:
3179 compile_error(ctx, "Unknown semantic: %s\n",
3180 tgsi_semantic_names[decl->Semantic.Name]);
3181 }
3182
3183 ctx->block->inputs[r] = instr;
3184 ctx->next_inloc++;
3185 so->total_in++;
3186 }
3187
3188 static void
3189 decl_out(struct ir3_compile_context *ctx, struct tgsi_full_declaration *decl)
3190 {
3191 struct ir3_shader_variant *so = ctx->so;
3192 unsigned comp = 0;
3193 unsigned name = decl->Semantic.Name;
3194 unsigned i;
3195
3196 compile_assert(ctx, decl->Declaration.Semantic);
3197
3198 DBG("decl out[%d] -> r%d", name, decl->Range.First);
3199
3200 if (ctx->type == TGSI_PROCESSOR_VERTEX) {
3201 switch (name) {
3202 case TGSI_SEMANTIC_POSITION:
3203 so->writes_pos = true;
3204 break;
3205 case TGSI_SEMANTIC_PSIZE:
3206 so->writes_psize = true;
3207 break;
3208 case TGSI_SEMANTIC_COLOR:
3209 case TGSI_SEMANTIC_BCOLOR:
3210 case TGSI_SEMANTIC_GENERIC:
3211 case TGSI_SEMANTIC_FOG:
3212 case TGSI_SEMANTIC_TEXCOORD:
3213 break;
3214 default:
3215 compile_error(ctx, "unknown VS semantic name: %s\n",
3216 tgsi_semantic_names[name]);
3217 }
3218 } else {
3219 switch (name) {
3220 case TGSI_SEMANTIC_POSITION:
3221 comp = 2; /* tgsi will write to .z component */
3222 so->writes_pos = true;
3223 break;
3224 case TGSI_SEMANTIC_COLOR:
3225 break;
3226 default:
3227 compile_error(ctx, "unknown FS semantic name: %s\n",
3228 tgsi_semantic_names[name]);
3229 }
3230 }
3231
3232 for (i = decl->Range.First; i <= decl->Range.Last; i++) {
3233 unsigned n = so->outputs_count++;
3234 unsigned ncomp, j;
3235
3236 ncomp = 4;
3237
3238 compile_assert(ctx, n < ARRAY_SIZE(so->outputs));
3239
3240 so->outputs[n].semantic = decl_semantic(&decl->Semantic);
3241 so->outputs[n].regid = regid(i, comp);
3242
3243 /* avoid undefined outputs, stick a dummy mov from imm{0.0},
3244 * which if the output is actually assigned will be over-
3245 * written
3246 */
3247 for (j = 0; j < ncomp; j++)
3248 ctx->block->outputs[(i * 4) + j] = create_immed(ctx, 0.0);
3249 }
3250 }
3251
3252 /* from TGSI perspective, we actually have inputs. But most of the "inputs"
3253 * for a fragment shader are just bary.f instructions. The *actual* inputs
3254 * from the hw perspective are the frag_pos and optionally frag_coord and
3255 * frag_face.
3256 */
3257 static void
3258 fixup_frag_inputs(struct ir3_compile_context *ctx)
3259 {
3260 struct ir3_shader_variant *so = ctx->so;
3261 struct ir3_block *block = ctx->block;
3262 struct ir3_instruction **inputs;
3263 struct ir3_instruction *instr;
3264 int n, regid = 0;
3265
3266 block->ninputs = 0;
3267
3268 n = 4; /* always have frag_pos */
3269 n += COND(so->frag_face, 4);
3270 n += COND(so->frag_coord, 4);
3271
3272 inputs = ir3_alloc(ctx->ir, n * (sizeof(struct ir3_instruction *)));
3273
3274 if (so->frag_face) {
3275 /* this ultimately gets assigned to hr0.x so doesn't conflict
3276 * with frag_coord/frag_pos..
3277 */
3278 inputs[block->ninputs++] = ctx->frag_face;
3279 ctx->frag_face->regs[0]->num = 0;
3280
3281 /* remaining channels not used, but let's avoid confusing
3282 * other parts that expect inputs to come in groups of vec4
3283 */
3284 inputs[block->ninputs++] = NULL;
3285 inputs[block->ninputs++] = NULL;
3286 inputs[block->ninputs++] = NULL;
3287 }
3288
3289 /* since we don't know where to set the regid for frag_coord,
3290 * we have to use r0.x for it. But we don't want to *always*
3291 * use r1.x for frag_pos as that could increase the register
3292 * footprint on simple shaders:
3293 */
3294 if (so->frag_coord) {
3295 ctx->frag_coord[0]->regs[0]->num = regid++;
3296 ctx->frag_coord[1]->regs[0]->num = regid++;
3297 ctx->frag_coord[2]->regs[0]->num = regid++;
3298 ctx->frag_coord[3]->regs[0]->num = regid++;
3299
3300 inputs[block->ninputs++] = ctx->frag_coord[0];
3301 inputs[block->ninputs++] = ctx->frag_coord[1];
3302 inputs[block->ninputs++] = ctx->frag_coord[2];
3303 inputs[block->ninputs++] = ctx->frag_coord[3];
3304 }
3305
3306 /* we always have frag_pos: */
3307 so->pos_regid = regid;
3308
3309 /* r0.x */
3310 instr = create_input(block, NULL, block->ninputs);
3311 instr->regs[0]->num = regid++;
3312 inputs[block->ninputs++] = instr;
3313 ctx->frag_pos->regs[1]->instr = instr;
3314
3315 /* r0.y */
3316 instr = create_input(block, NULL, block->ninputs);
3317 instr->regs[0]->num = regid++;
3318 inputs[block->ninputs++] = instr;
3319 ctx->frag_pos->regs[2]->instr = instr;
3320
3321 block->inputs = inputs;
3322 }
3323
3324 static void
3325 compile_instructions(struct ir3_compile_context *ctx)
3326 {
3327 push_block(ctx);
3328
3329 /* for fragment shader, we have a single input register (usually
3330 * r0.xy) which is used as the base for bary.f varying fetch instrs:
3331 */
3332 if (ctx->type == TGSI_PROCESSOR_FRAGMENT) {
3333 struct ir3_instruction *instr;
3334 instr = ir3_instr_create(ctx->block, -1, OPC_META_FI);
3335 ir3_reg_create(instr, 0, 0);
3336 ir3_reg_create(instr, 0, IR3_REG_SSA); /* r0.x */
3337 ir3_reg_create(instr, 0, IR3_REG_SSA); /* r0.y */
3338 ctx->frag_pos = instr;
3339 }
3340
3341 while (!tgsi_parse_end_of_tokens(&ctx->parser)) {
3342 tgsi_parse_token(&ctx->parser);
3343
3344 switch (ctx->parser.FullToken.Token.Type) {
3345 case TGSI_TOKEN_TYPE_DECLARATION: {
3346 struct tgsi_full_declaration *decl =
3347 &ctx->parser.FullToken.FullDeclaration;
3348 unsigned file = decl->Declaration.File;
3349 if (file == TGSI_FILE_OUTPUT) {
3350 decl_out(ctx, decl);
3351 } else if (file == TGSI_FILE_INPUT) {
3352 decl_in(ctx, decl);
3353 } else if (decl->Declaration.File == TGSI_FILE_SYSTEM_VALUE) {
3354 decl_sv(ctx, decl);
3355 }
3356
3357 if ((file != TGSI_FILE_CONSTANT) && decl->Declaration.Array) {
3358 int aid = decl->Array.ArrayID + ctx->array_offsets[file];
3359
3360 compile_assert(ctx, aid < ARRAY_SIZE(ctx->array));
3361
3362 /* legacy ArrayID==0 stuff probably isn't going to work
3363 * well (and is at least untested).. let's just scream:
3364 */
3365 compile_assert(ctx, aid != 0);
3366
3367 ctx->array[aid].first = decl->Range.First;
3368 ctx->array[aid].last = decl->Range.Last;
3369 }
3370 break;
3371 }
3372 case TGSI_TOKEN_TYPE_IMMEDIATE: {
3373 /* TODO: if we know the immediate is small enough, and only
3374 * used with instructions that can embed an immediate, we
3375 * can skip this:
3376 */
3377 struct tgsi_full_immediate *imm =
3378 &ctx->parser.FullToken.FullImmediate;
3379 unsigned n = ctx->so->immediates_count++;
3380 compile_assert(ctx, n < ARRAY_SIZE(ctx->so->immediates));
3381 memcpy(ctx->so->immediates[n].val, imm->u, 16);
3382 break;
3383 }
3384 case TGSI_TOKEN_TYPE_INSTRUCTION: {
3385 struct tgsi_full_instruction *inst =
3386 &ctx->parser.FullToken.FullInstruction;
3387 unsigned opc = inst->Instruction.Opcode;
3388 const struct instr_translater *t = &translaters[opc];
3389
3390 if (t->fxn) {
3391 t->fxn(t, ctx, inst);
3392 ctx->num_internal_temps = 0;
3393
3394 compile_assert(ctx, !ctx->using_tmp_dst);
3395 } else {
3396 compile_error(ctx, "unknown TGSI opc: %s\n",
3397 tgsi_get_opcode_name(opc));
3398 }
3399
3400 switch (inst->Instruction.Saturate) {
3401 case TGSI_SAT_ZERO_ONE:
3402 create_clamp_imm(ctx, &inst->Dst[0].Register,
3403 fui(0.0), fui(1.0));
3404 break;
3405 case TGSI_SAT_MINUS_PLUS_ONE:
3406 create_clamp_imm(ctx, &inst->Dst[0].Register,
3407 fui(-1.0), fui(1.0));
3408 break;
3409 }
3410
3411 instr_finish(ctx);
3412
3413 break;
3414 }
3415 case TGSI_TOKEN_TYPE_PROPERTY: {
3416 struct tgsi_full_property *prop =
3417 &ctx->parser.FullToken.FullProperty;
3418 switch (prop->Property.PropertyName) {
3419 case TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS:
3420 ctx->so->color0_mrt = !!prop->u[0].Data;
3421 break;
3422 }
3423 }
3424 default:
3425 break;
3426 }
3427 }
3428 }
3429
3430 static void
3431 compile_dump(struct ir3_compile_context *ctx)
3432 {
3433 const char *name = (ctx->so->type == SHADER_VERTEX) ? "vert" : "frag";
3434 static unsigned n = 0;
3435 char fname[16];
3436 FILE *f;
3437 snprintf(fname, sizeof(fname), "%s-%04u.dot", name, n++);
3438 f = fopen(fname, "w");
3439 if (!f)
3440 return;
3441 ir3_block_depth(ctx->block);
3442 ir3_dump(ctx->ir, name, ctx->block, f);
3443 fclose(f);
3444 }
3445
3446 int
3447 ir3_compile_shader(struct ir3_shader_variant *so,
3448 const struct tgsi_token *tokens, struct ir3_shader_key key,
3449 bool cp)
3450 {
3451 struct ir3_compile_context ctx;
3452 struct ir3_block *block;
3453 struct ir3_instruction **inputs;
3454 unsigned i, j, actual_in;
3455 int ret = 0, max_bary;
3456
3457 assert(!so->ir);
3458
3459 so->ir = ir3_create();
3460
3461 assert(so->ir);
3462
3463 if (compile_init(&ctx, so, tokens) != TGSI_PARSE_OK) {
3464 DBG("INIT failed!");
3465 ret = -1;
3466 goto out;
3467 }
3468
3469 /* for now, until the edge cases are worked out: */
3470 if (ctx.info.indirect_files_written & (FM(TEMPORARY) | FM(INPUT) | FM(OUTPUT)))
3471 cp = false;
3472
3473 compile_instructions(&ctx);
3474
3475 block = ctx.block;
3476 so->ir->block = block;
3477
3478 /* keep track of the inputs from TGSI perspective.. */
3479 inputs = block->inputs;
3480
3481 /* but fixup actual inputs for frag shader: */
3482 if (ctx.type == TGSI_PROCESSOR_FRAGMENT)
3483 fixup_frag_inputs(&ctx);
3484
3485 /* at this point, for binning pass, throw away unneeded outputs: */
3486 if (key.binning_pass) {
3487 for (i = 0, j = 0; i < so->outputs_count; i++) {
3488 unsigned name = sem2name(so->outputs[i].semantic);
3489 unsigned idx = sem2idx(so->outputs[i].semantic);
3490
3491 /* throw away everything but first position/psize */
3492 if ((idx == 0) && ((name == TGSI_SEMANTIC_POSITION) ||
3493 (name == TGSI_SEMANTIC_PSIZE))) {
3494 if (i != j) {
3495 so->outputs[j] = so->outputs[i];
3496 block->outputs[(j*4)+0] = block->outputs[(i*4)+0];
3497 block->outputs[(j*4)+1] = block->outputs[(i*4)+1];
3498 block->outputs[(j*4)+2] = block->outputs[(i*4)+2];
3499 block->outputs[(j*4)+3] = block->outputs[(i*4)+3];
3500 }
3501 j++;
3502 }
3503 }
3504 so->outputs_count = j;
3505 block->noutputs = j * 4;
3506 }
3507
3508 /* if we want half-precision outputs, mark the output registers
3509 * as half:
3510 */
3511 if (key.half_precision) {
3512 for (i = 0; i < block->noutputs; i++) {
3513 if (!block->outputs[i])
3514 continue;
3515 block->outputs[i]->regs[0]->flags |= IR3_REG_HALF;
3516 }
3517 }
3518
3519 /* at this point, we want the kill's in the outputs array too,
3520 * so that they get scheduled (since they have no dst).. we've
3521 * already ensured that the array is big enough in push_block():
3522 */
3523 if (ctx.type == TGSI_PROCESSOR_FRAGMENT) {
3524 for (i = 0; i < ctx.kill_count; i++)
3525 block->outputs[block->noutputs++] = ctx.kill[i];
3526 }
3527
3528 if (fd_mesa_debug & FD_DBG_OPTDUMP)
3529 compile_dump(&ctx);
3530
3531 ret = ir3_block_flatten(block);
3532 if (ret < 0) {
3533 DBG("FLATTEN failed!");
3534 goto out;
3535 }
3536 if ((ret > 0) && (fd_mesa_debug & FD_DBG_OPTDUMP))
3537 compile_dump(&ctx);
3538
3539 if (fd_mesa_debug & FD_DBG_OPTMSGS) {
3540 printf("BEFORE CP:\n");
3541 ir3_dump_instr_list(block->head);
3542 }
3543
3544 ir3_block_depth(block);
3545
3546 /* First remove all the extra mov's (which we could skip if the
3547 * front-end was clever enough not to insert them in the first
3548 * place). Then figure out left/right neighbors, re-inserting
3549 * extra mov's when needed to avoid conflicts.
3550 */
3551 if (cp && !(fd_mesa_debug & FD_DBG_NOCP))
3552 ir3_block_cp(block);
3553
3554 if (fd_mesa_debug & FD_DBG_OPTMSGS) {
3555 printf("BEFORE GROUPING:\n");
3556 ir3_dump_instr_list(block->head);
3557 }
3558
3559 /* Group left/right neighbors, inserting mov's where needed to
3560 * solve conflicts:
3561 */
3562 ir3_block_group(block);
3563
3564 if (fd_mesa_debug & FD_DBG_OPTDUMP)
3565 compile_dump(&ctx);
3566
3567 ir3_block_depth(block);
3568
3569 if (fd_mesa_debug & FD_DBG_OPTMSGS) {
3570 printf("AFTER DEPTH:\n");
3571 ir3_dump_instr_list(block->head);
3572 }
3573
3574 ret = ir3_block_sched(block);
3575 if (ret) {
3576 DBG("SCHED failed!");
3577 goto out;
3578 }
3579
3580 if (fd_mesa_debug & FD_DBG_OPTMSGS) {
3581 printf("AFTER SCHED:\n");
3582 ir3_dump_instr_list(block->head);
3583 }
3584
3585 ret = ir3_block_ra(block, so->type, so->frag_coord, so->frag_face);
3586 if (ret) {
3587 DBG("RA failed!");
3588 goto out;
3589 }
3590
3591 if (fd_mesa_debug & FD_DBG_OPTMSGS) {
3592 printf("AFTER RA:\n");
3593 ir3_dump_instr_list(block->head);
3594 }
3595
3596 ir3_block_legalize(block, &so->has_samp, &max_bary);
3597
3598 /* fixup input/outputs: */
3599 for (i = 0; i < so->outputs_count; i++) {
3600 so->outputs[i].regid = block->outputs[i*4]->regs[0]->num;
3601 /* preserve hack for depth output.. tgsi writes depth to .z,
3602 * but what we give the hw is the scalar register:
3603 */
3604 if ((ctx.type == TGSI_PROCESSOR_FRAGMENT) &&
3605 (sem2name(so->outputs[i].semantic) == TGSI_SEMANTIC_POSITION))
3606 so->outputs[i].regid += 2;
3607 }
3608 /* Note that some or all channels of an input may be unused: */
3609 actual_in = 0;
3610 for (i = 0; i < so->inputs_count; i++) {
3611 unsigned j, regid = ~0, compmask = 0;
3612 so->inputs[i].ncomp = 0;
3613 for (j = 0; j < 4; j++) {
3614 struct ir3_instruction *in = inputs[(i*4) + j];
3615 if (in) {
3616 compmask |= (1 << j);
3617 regid = in->regs[0]->num - j;
3618 actual_in++;
3619 so->inputs[i].ncomp++;
3620 }
3621 }
3622 so->inputs[i].regid = regid;
3623 so->inputs[i].compmask = compmask;
3624 }
3625
3626 /* fragment shader always gets full vec4's even if it doesn't
3627 * fetch all components, but vertex shader we need to update
3628 * with the actual number of components fetch, otherwise thing
3629 * will hang due to mismaptch between VFD_DECODE's and
3630 * TOTALATTRTOVS
3631 */
3632 if (so->type == SHADER_VERTEX)
3633 so->total_in = actual_in;
3634 else
3635 so->total_in = align(max_bary + 1, 4);
3636
3637 out:
3638 if (ret) {
3639 ir3_destroy(so->ir);
3640 so->ir = NULL;
3641 }
3642 compile_free(&ctx);
3643
3644 return ret;
3645 }