freedreno/ir3: encode instruction category in opc_t
[mesa.git] / src / gallium / drivers / freedreno / ir3 / ir3.h
1 /*
2 * Copyright (c) 2013 Rob Clark <robdclark@gmail.com>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #ifndef IR3_H_
25 #define IR3_H_
26
27 #include <stdint.h>
28 #include <stdbool.h>
29
30 #include "util/u_debug.h"
31 #include "util/list.h"
32
33 #include "instr-a3xx.h"
34 #include "disasm.h" /* TODO move 'enum shader_t' somewhere else.. */
35
36 /* low level intermediate representation of an adreno shader program */
37
38 struct ir3_compiler;
39 struct ir3;
40 struct ir3_instruction;
41 struct ir3_block;
42
43 struct ir3_info {
44 uint32_t gpu_id;
45 uint16_t sizedwords;
46 uint16_t instrs_count; /* expanded to account for rpt's */
47 /* NOTE: max_reg, etc, does not include registers not touched
48 * by the shader (ie. vertex fetched via VFD_DECODE but not
49 * touched by shader)
50 */
51 int8_t max_reg; /* highest GPR # used by shader */
52 int8_t max_half_reg;
53 int16_t max_const;
54 };
55
56 struct ir3_register {
57 enum {
58 IR3_REG_CONST = 0x001,
59 IR3_REG_IMMED = 0x002,
60 IR3_REG_HALF = 0x004,
61 IR3_REG_RELATIV= 0x008,
62 IR3_REG_R = 0x010,
63 /* Most instructions, it seems, can do float abs/neg but not
64 * integer. The CP pass needs to know what is intended (int or
65 * float) in order to do the right thing. For this reason the
66 * abs/neg flags are split out into float and int variants. In
67 * addition, .b (bitwise) operations, the negate is actually a
68 * bitwise not, so split that out into a new flag to make it
69 * more clear.
70 */
71 IR3_REG_FNEG = 0x020,
72 IR3_REG_FABS = 0x040,
73 IR3_REG_SNEG = 0x080,
74 IR3_REG_SABS = 0x100,
75 IR3_REG_BNOT = 0x200,
76 IR3_REG_EVEN = 0x400,
77 IR3_REG_POS_INF= 0x800,
78 /* (ei) flag, end-input? Set on last bary, presumably to signal
79 * that the shader needs no more input:
80 */
81 IR3_REG_EI = 0x1000,
82 /* meta-flags, for intermediate stages of IR, ie.
83 * before register assignment is done:
84 */
85 IR3_REG_SSA = 0x2000, /* 'instr' is ptr to assigning instr */
86 IR3_REG_ARRAY = 0x4000,
87 IR3_REG_PHI_SRC= 0x8000, /* phi src, regs[0]->instr points to phi */
88
89 } flags;
90 union {
91 /* normal registers:
92 * the component is in the low two bits of the reg #, so
93 * rN.x becomes: (N << 2) | x
94 */
95 int num;
96 /* immediate: */
97 int32_t iim_val;
98 uint32_t uim_val;
99 float fim_val;
100 /* relative: */
101 struct {
102 uint16_t id;
103 int16_t offset;
104 } array;
105 };
106
107 /* For IR3_REG_SSA, src registers contain ptr back to assigning
108 * instruction.
109 *
110 * For IR3_REG_ARRAY, the pointer is back to the last dependent
111 * array access (although the net effect is the same, it points
112 * back to a previous instruction that we depend on).
113 */
114 struct ir3_instruction *instr;
115
116 union {
117 /* used for cat5 instructions, but also for internal/IR level
118 * tracking of what registers are read/written by an instruction.
119 * wrmask may be a bad name since it is used to represent both
120 * src and dst that touch multiple adjacent registers.
121 */
122 unsigned wrmask;
123 /* for relative addressing, 32bits for array size is too small,
124 * but otoh we don't need to deal with disjoint sets, so instead
125 * use a simple size field (number of scalar components).
126 */
127 unsigned size;
128 };
129 };
130
131 struct ir3_instruction {
132 struct ir3_block *block;
133 int category;
134 opc_t opc;
135 enum {
136 /* (sy) flag is set on first instruction, and after sample
137 * instructions (probably just on RAW hazard).
138 */
139 IR3_INSTR_SY = 0x001,
140 /* (ss) flag is set on first instruction, and first instruction
141 * to depend on the result of "long" instructions (RAW hazard):
142 *
143 * rcp, rsq, log2, exp2, sin, cos, sqrt
144 *
145 * It seems to synchronize until all in-flight instructions are
146 * completed, for example:
147 *
148 * rsq hr1.w, hr1.w
149 * add.f hr2.z, (neg)hr2.z, hc0.y
150 * mul.f hr2.w, (neg)hr2.y, (neg)hr2.y
151 * rsq hr2.x, hr2.x
152 * (rpt1)nop
153 * mad.f16 hr2.w, hr2.z, hr2.z, hr2.w
154 * nop
155 * mad.f16 hr2.w, (neg)hr0.w, (neg)hr0.w, hr2.w
156 * (ss)(rpt2)mul.f hr1.x, (r)hr1.x, hr1.w
157 * (rpt2)mul.f hr0.x, (neg)(r)hr0.x, hr2.x
158 *
159 * The last mul.f does not have (ss) set, presumably because the
160 * (ss) on the previous instruction does the job.
161 *
162 * The blob driver also seems to set it on WAR hazards, although
163 * not really clear if this is needed or just blob compiler being
164 * sloppy. So far I haven't found a case where removing the (ss)
165 * causes problems for WAR hazard, but I could just be getting
166 * lucky:
167 *
168 * rcp r1.y, r3.y
169 * (ss)(rpt2)mad.f32 r3.y, (r)c9.x, r1.x, (r)r3.z
170 *
171 */
172 IR3_INSTR_SS = 0x002,
173 /* (jp) flag is set on jump targets:
174 */
175 IR3_INSTR_JP = 0x004,
176 IR3_INSTR_UL = 0x008,
177 IR3_INSTR_3D = 0x010,
178 IR3_INSTR_A = 0x020,
179 IR3_INSTR_O = 0x040,
180 IR3_INSTR_P = 0x080,
181 IR3_INSTR_S = 0x100,
182 IR3_INSTR_S2EN = 0x200,
183 IR3_INSTR_G = 0x400,
184 /* meta-flags, for intermediate stages of IR, ie.
185 * before register assignment is done:
186 */
187 IR3_INSTR_MARK = 0x1000,
188 IR3_INSTR_UNUSED= 0x2000,
189 } flags;
190 int repeat;
191 #ifdef DEBUG
192 unsigned regs_max;
193 #endif
194 unsigned regs_count;
195 struct ir3_register **regs;
196 union {
197 struct {
198 char inv;
199 char comp;
200 int immed;
201 struct ir3_block *target;
202 } cat0;
203 struct {
204 type_t src_type, dst_type;
205 } cat1;
206 struct {
207 enum {
208 IR3_COND_LT = 0,
209 IR3_COND_LE = 1,
210 IR3_COND_GT = 2,
211 IR3_COND_GE = 3,
212 IR3_COND_EQ = 4,
213 IR3_COND_NE = 5,
214 } condition;
215 } cat2;
216 struct {
217 unsigned samp, tex;
218 type_t type;
219 } cat5;
220 struct {
221 type_t type;
222 int src_offset;
223 int dst_offset;
224 int iim_val;
225 } cat6;
226 /* for meta-instructions, just used to hold extra data
227 * before instruction scheduling, etc
228 */
229 struct {
230 int off; /* component/offset */
231 } fo;
232 struct {
233 /* used to temporarily hold reference to nir_phi_instr
234 * until we resolve the phi srcs
235 */
236 void *nphi;
237 } phi;
238 struct {
239 struct ir3_block *block;
240 } inout;
241 };
242
243 /* transient values used during various algorithms: */
244 union {
245 /* The instruction depth is the max dependency distance to output.
246 *
247 * You can also think of it as the "cost", if we did any sort of
248 * optimization for register footprint. Ie. a value that is just
249 * result of moving a const to a reg would have a low cost, so to
250 * it could make sense to duplicate the instruction at various
251 * points where the result is needed to reduce register footprint.
252 */
253 unsigned depth;
254 /* When we get to the RA stage, we no longer need depth, but
255 * we do need instruction's position/name:
256 */
257 struct {
258 uint16_t ip;
259 uint16_t name;
260 };
261 };
262
263 /* used for per-pass extra instruction data.
264 */
265 void *data;
266
267 /* Used during CP and RA stages. For fanin and shader inputs/
268 * outputs where we need a sequence of consecutive registers,
269 * keep track of each src instructions left (ie 'n-1') and right
270 * (ie 'n+1') neighbor. The front-end must insert enough mov's
271 * to ensure that each instruction has at most one left and at
272 * most one right neighbor. During the copy-propagation pass,
273 * we only remove mov's when we can preserve this constraint.
274 * And during the RA stage, we use the neighbor information to
275 * allocate a block of registers in one shot.
276 *
277 * TODO: maybe just add something like:
278 * struct ir3_instruction_ref {
279 * struct ir3_instruction *instr;
280 * unsigned cnt;
281 * }
282 *
283 * Or can we get away without the refcnt stuff? It seems like
284 * it should be overkill.. the problem is if, potentially after
285 * already eliminating some mov's, if you have a single mov that
286 * needs to be grouped with it's neighbors in two different
287 * places (ex. shader output and a fanin).
288 */
289 struct {
290 struct ir3_instruction *left, *right;
291 uint16_t left_cnt, right_cnt;
292 } cp;
293
294 /* an instruction can reference at most one address register amongst
295 * it's src/dst registers. Beyond that, you need to insert mov's.
296 *
297 * NOTE: do not write this directly, use ir3_instr_set_address()
298 */
299 struct ir3_instruction *address;
300
301 /* Entry in ir3_block's instruction list: */
302 struct list_head node;
303
304 #ifdef DEBUG
305 uint32_t serialno;
306 #endif
307 };
308
309 static inline struct ir3_instruction *
310 ir3_neighbor_first(struct ir3_instruction *instr)
311 {
312 while (instr->cp.left)
313 instr = instr->cp.left;
314 return instr;
315 }
316
317 static inline int ir3_neighbor_count(struct ir3_instruction *instr)
318 {
319 int num = 1;
320
321 debug_assert(!instr->cp.left);
322
323 while (instr->cp.right) {
324 num++;
325 instr = instr->cp.right;
326 }
327
328 return num;
329 }
330
331 struct ir3_heap_chunk;
332
333 struct ir3 {
334 struct ir3_compiler *compiler;
335
336 unsigned ninputs, noutputs;
337 struct ir3_instruction **inputs;
338 struct ir3_instruction **outputs;
339
340 /* Track bary.f (and ldlv) instructions.. this is needed in
341 * scheduling to ensure that all varying fetches happen before
342 * any potential kill instructions. The hw gets grumpy if all
343 * threads in a group are killed before the last bary.f gets
344 * a chance to signal end of input (ei).
345 */
346 unsigned baryfs_count, baryfs_sz;
347 struct ir3_instruction **baryfs;
348
349 /* Track all indirect instructions (read and write). To avoid
350 * deadlock scenario where an address register gets scheduled,
351 * but other dependent src instructions cannot be scheduled due
352 * to dependency on a *different* address register value, the
353 * scheduler needs to ensure that all dependencies other than
354 * the instruction other than the address register are scheduled
355 * before the one that writes the address register. Having a
356 * convenient list of instructions that reference some address
357 * register simplifies this.
358 */
359 unsigned indirects_count, indirects_sz;
360 struct ir3_instruction **indirects;
361 /* and same for instructions that consume predicate register: */
362 unsigned predicates_count, predicates_sz;
363 struct ir3_instruction **predicates;
364
365 /* Track instructions which do not write a register but other-
366 * wise must not be discarded (such as kill, stg, etc)
367 */
368 unsigned keeps_count, keeps_sz;
369 struct ir3_instruction **keeps;
370
371 /* List of blocks: */
372 struct list_head block_list;
373
374 /* List of ir3_array's: */
375 struct list_head array_list;
376
377 unsigned heap_idx;
378 struct ir3_heap_chunk *chunk;
379 };
380
381 typedef struct nir_variable nir_variable;
382
383 struct ir3_array {
384 struct list_head node;
385 unsigned length;
386 unsigned id;
387
388 nir_variable *var;
389
390 /* We track the last write and last access (read or write) to
391 * setup dependencies on instructions that read or write the
392 * array. Reads can be re-ordered wrt. other reads, but should
393 * not be re-ordered wrt. to writes. Writes cannot be reordered
394 * wrt. any other access to the array.
395 *
396 * So array reads depend on last write, and array writes depend
397 * on the last access.
398 */
399 struct ir3_instruction *last_write, *last_access;
400
401 /* extra stuff used in RA pass: */
402 unsigned base; /* base vreg name */
403 unsigned reg; /* base physical reg */
404 uint16_t start_ip, end_ip;
405 };
406
407 struct ir3_array * ir3_lookup_array(struct ir3 *ir, unsigned id);
408
409 typedef struct nir_block nir_block;
410
411 struct ir3_block {
412 struct list_head node;
413 struct ir3 *shader;
414
415 nir_block *nblock;
416
417 struct list_head instr_list; /* list of ir3_instruction */
418
419 /* each block has either one or two successors.. in case of
420 * two successors, 'condition' decides which one to follow.
421 * A block preceding an if/else has two successors.
422 */
423 struct ir3_instruction *condition;
424 struct ir3_block *successors[2];
425
426 uint16_t start_ip, end_ip;
427
428 /* used for per-pass extra block data. Mainly used right
429 * now in RA step to track livein/liveout.
430 */
431 void *data;
432
433 #ifdef DEBUG
434 uint32_t serialno;
435 #endif
436 };
437
438 struct ir3 * ir3_create(struct ir3_compiler *compiler,
439 unsigned nin, unsigned nout);
440 void ir3_destroy(struct ir3 *shader);
441 void * ir3_assemble(struct ir3 *shader,
442 struct ir3_info *info, uint32_t gpu_id);
443 void * ir3_alloc(struct ir3 *shader, int sz);
444
445 struct ir3_block * ir3_block_create(struct ir3 *shader);
446
447 struct ir3_instruction * ir3_instr_create(struct ir3_block *block,
448 int category, opc_t opc);
449 struct ir3_instruction * ir3_instr_create2(struct ir3_block *block,
450 int category, opc_t opc, int nreg);
451 struct ir3_instruction * ir3_instr_clone(struct ir3_instruction *instr);
452 const char *ir3_instr_name(struct ir3_instruction *instr);
453
454 struct ir3_register * ir3_reg_create(struct ir3_instruction *instr,
455 int num, int flags);
456 struct ir3_register * ir3_reg_clone(struct ir3 *shader,
457 struct ir3_register *reg);
458
459 void ir3_instr_set_address(struct ir3_instruction *instr,
460 struct ir3_instruction *addr);
461
462 static inline bool ir3_instr_check_mark(struct ir3_instruction *instr)
463 {
464 if (instr->flags & IR3_INSTR_MARK)
465 return true; /* already visited */
466 instr->flags |= IR3_INSTR_MARK;
467 return false;
468 }
469
470 void ir3_block_clear_mark(struct ir3_block *block);
471 void ir3_clear_mark(struct ir3 *shader);
472
473 unsigned ir3_count_instructions(struct ir3 *ir);
474
475 static inline int ir3_instr_regno(struct ir3_instruction *instr,
476 struct ir3_register *reg)
477 {
478 unsigned i;
479 for (i = 0; i < instr->regs_count; i++)
480 if (reg == instr->regs[i])
481 return i;
482 return -1;
483 }
484
485
486 #define MAX_ARRAYS 16
487
488 /* comp:
489 * 0 - x
490 * 1 - y
491 * 2 - z
492 * 3 - w
493 */
494 static inline uint32_t regid(int num, int comp)
495 {
496 return (num << 2) | (comp & 0x3);
497 }
498
499 static inline uint32_t reg_num(struct ir3_register *reg)
500 {
501 return reg->num >> 2;
502 }
503
504 static inline uint32_t reg_comp(struct ir3_register *reg)
505 {
506 return reg->num & 0x3;
507 }
508
509 static inline bool is_flow(struct ir3_instruction *instr)
510 {
511 return (instr->category == 0);
512 }
513
514 static inline bool is_kill(struct ir3_instruction *instr)
515 {
516 return is_flow(instr) && (instr->opc == OPC_KILL);
517 }
518
519 static inline bool is_nop(struct ir3_instruction *instr)
520 {
521 return is_flow(instr) && (instr->opc == OPC_NOP);
522 }
523
524 /* Is it a non-transformative (ie. not type changing) mov? This can
525 * also include absneg.s/absneg.f, which for the most part can be
526 * treated as a mov (single src argument).
527 */
528 static inline bool is_same_type_mov(struct ir3_instruction *instr)
529 {
530 struct ir3_register *dst = instr->regs[0];
531
532 /* mov's that write to a0.x or p0.x are special: */
533 if (dst->num == regid(REG_P0, 0))
534 return false;
535 if (dst->num == regid(REG_A0, 0))
536 return false;
537
538 if (dst->flags & (IR3_REG_RELATIV | IR3_REG_ARRAY))
539 return false;
540
541 if ((instr->category == 1) &&
542 (instr->cat1.src_type == instr->cat1.dst_type))
543 return true;
544 if ((instr->category == 2) && ((instr->opc == OPC_ABSNEG_F) ||
545 (instr->opc == OPC_ABSNEG_S)))
546 return true;
547 return false;
548 }
549
550 static inline bool is_alu(struct ir3_instruction *instr)
551 {
552 return (1 <= instr->category) && (instr->category <= 3);
553 }
554
555 static inline bool is_sfu(struct ir3_instruction *instr)
556 {
557 return (instr->category == 4);
558 }
559
560 static inline bool is_tex(struct ir3_instruction *instr)
561 {
562 return (instr->category == 5);
563 }
564
565 static inline bool is_mem(struct ir3_instruction *instr)
566 {
567 return (instr->category == 6);
568 }
569
570 static inline bool
571 is_store(struct ir3_instruction *instr)
572 {
573 if (is_mem(instr)) {
574 /* these instructions, the "destination" register is
575 * actually a source, the address to store to.
576 */
577 switch (instr->opc) {
578 case OPC_STG:
579 case OPC_STP:
580 case OPC_STL:
581 case OPC_STLW:
582 case OPC_L2G:
583 case OPC_G2L:
584 return true;
585 default:
586 break;
587 }
588 }
589 return false;
590 }
591
592 static inline bool is_load(struct ir3_instruction *instr)
593 {
594 if (is_mem(instr)) {
595 switch (instr->opc) {
596 case OPC_LDG:
597 case OPC_LDL:
598 case OPC_LDP:
599 case OPC_L2G:
600 case OPC_LDLW:
601 case OPC_LDC_4:
602 case OPC_LDLV:
603 /* probably some others too.. */
604 return true;
605 default:
606 break;
607 }
608 }
609 return false;
610 }
611
612 static inline bool is_input(struct ir3_instruction *instr)
613 {
614 /* in some cases, ldlv is used to fetch varying without
615 * interpolation.. fortunately inloc is the first src
616 * register in either case
617 */
618 if (is_mem(instr) && (instr->opc == OPC_LDLV))
619 return true;
620 return (instr->category == 2) && (instr->opc == OPC_BARY_F);
621 }
622
623 static inline bool is_meta(struct ir3_instruction *instr)
624 {
625 /* TODO how should we count PHI (and maybe fan-in/out) which
626 * might actually contribute some instructions to the final
627 * result?
628 */
629 return (instr->category == -1);
630 }
631
632 static inline bool writes_addr(struct ir3_instruction *instr)
633 {
634 if (instr->regs_count > 0) {
635 struct ir3_register *dst = instr->regs[0];
636 return reg_num(dst) == REG_A0;
637 }
638 return false;
639 }
640
641 static inline bool writes_pred(struct ir3_instruction *instr)
642 {
643 if (instr->regs_count > 0) {
644 struct ir3_register *dst = instr->regs[0];
645 return reg_num(dst) == REG_P0;
646 }
647 return false;
648 }
649
650 /* returns defining instruction for reg */
651 /* TODO better name */
652 static inline struct ir3_instruction *ssa(struct ir3_register *reg)
653 {
654 if (reg->flags & (IR3_REG_SSA | IR3_REG_ARRAY)) {
655 debug_assert(!(reg->instr && (reg->instr->flags & IR3_INSTR_UNUSED)));
656 return reg->instr;
657 }
658 return NULL;
659 }
660
661 static inline bool conflicts(struct ir3_instruction *a,
662 struct ir3_instruction *b)
663 {
664 return (a && b) && (a != b);
665 }
666
667 static inline bool reg_gpr(struct ir3_register *r)
668 {
669 if (r->flags & (IR3_REG_CONST | IR3_REG_IMMED))
670 return false;
671 if ((reg_num(r) == REG_A0) || (reg_num(r) == REG_P0))
672 return false;
673 return true;
674 }
675
676 static inline type_t half_type(type_t type)
677 {
678 switch (type) {
679 case TYPE_F32: return TYPE_F16;
680 case TYPE_U32: return TYPE_U16;
681 case TYPE_S32: return TYPE_S16;
682 case TYPE_F16:
683 case TYPE_U16:
684 case TYPE_S16:
685 return type;
686 default:
687 assert(0);
688 return ~0;
689 }
690 }
691
692 /* some cat2 instructions (ie. those which are not float) can embed an
693 * immediate:
694 */
695 static inline bool ir3_cat2_int(opc_t opc)
696 {
697 switch (opc) {
698 case OPC_ADD_U:
699 case OPC_ADD_S:
700 case OPC_SUB_U:
701 case OPC_SUB_S:
702 case OPC_CMPS_U:
703 case OPC_CMPS_S:
704 case OPC_MIN_U:
705 case OPC_MIN_S:
706 case OPC_MAX_U:
707 case OPC_MAX_S:
708 case OPC_CMPV_U:
709 case OPC_CMPV_S:
710 case OPC_MUL_U:
711 case OPC_MUL_S:
712 case OPC_MULL_U:
713 case OPC_CLZ_S:
714 case OPC_ABSNEG_S:
715 case OPC_AND_B:
716 case OPC_OR_B:
717 case OPC_NOT_B:
718 case OPC_XOR_B:
719 case OPC_BFREV_B:
720 case OPC_CLZ_B:
721 case OPC_SHL_B:
722 case OPC_SHR_B:
723 case OPC_ASHR_B:
724 case OPC_MGEN_B:
725 case OPC_GETBIT_B:
726 case OPC_CBITS_B:
727 case OPC_BARY_F:
728 return true;
729
730 default:
731 return false;
732 }
733 }
734
735
736 /* map cat2 instruction to valid abs/neg flags: */
737 static inline unsigned ir3_cat2_absneg(opc_t opc)
738 {
739 switch (opc) {
740 case OPC_ADD_F:
741 case OPC_MIN_F:
742 case OPC_MAX_F:
743 case OPC_MUL_F:
744 case OPC_SIGN_F:
745 case OPC_CMPS_F:
746 case OPC_ABSNEG_F:
747 case OPC_CMPV_F:
748 case OPC_FLOOR_F:
749 case OPC_CEIL_F:
750 case OPC_RNDNE_F:
751 case OPC_RNDAZ_F:
752 case OPC_TRUNC_F:
753 case OPC_BARY_F:
754 return IR3_REG_FABS | IR3_REG_FNEG;
755
756 case OPC_ADD_U:
757 case OPC_ADD_S:
758 case OPC_SUB_U:
759 case OPC_SUB_S:
760 case OPC_CMPS_U:
761 case OPC_CMPS_S:
762 case OPC_MIN_U:
763 case OPC_MIN_S:
764 case OPC_MAX_U:
765 case OPC_MAX_S:
766 case OPC_CMPV_U:
767 case OPC_CMPV_S:
768 case OPC_MUL_U:
769 case OPC_MUL_S:
770 case OPC_MULL_U:
771 case OPC_CLZ_S:
772 return 0;
773
774 case OPC_ABSNEG_S:
775 return IR3_REG_SABS | IR3_REG_SNEG;
776
777 case OPC_AND_B:
778 case OPC_OR_B:
779 case OPC_NOT_B:
780 case OPC_XOR_B:
781 case OPC_BFREV_B:
782 case OPC_CLZ_B:
783 case OPC_SHL_B:
784 case OPC_SHR_B:
785 case OPC_ASHR_B:
786 case OPC_MGEN_B:
787 case OPC_GETBIT_B:
788 case OPC_CBITS_B:
789 return IR3_REG_BNOT;
790
791 default:
792 return 0;
793 }
794 }
795
796 /* map cat3 instructions to valid abs/neg flags: */
797 static inline unsigned ir3_cat3_absneg(opc_t opc)
798 {
799 switch (opc) {
800 case OPC_MAD_F16:
801 case OPC_MAD_F32:
802 case OPC_SEL_F16:
803 case OPC_SEL_F32:
804 return IR3_REG_FNEG;
805
806 case OPC_MAD_U16:
807 case OPC_MADSH_U16:
808 case OPC_MAD_S16:
809 case OPC_MADSH_M16:
810 case OPC_MAD_U24:
811 case OPC_MAD_S24:
812 case OPC_SEL_S16:
813 case OPC_SEL_S32:
814 case OPC_SAD_S16:
815 case OPC_SAD_S32:
816 /* neg *may* work on 3rd src.. */
817
818 case OPC_SEL_B16:
819 case OPC_SEL_B32:
820
821 default:
822 return 0;
823 }
824 }
825
826 #define array_insert(arr, val) do { \
827 if (arr ## _count == arr ## _sz) { \
828 arr ## _sz = MAX2(2 * arr ## _sz, 16); \
829 arr = realloc(arr, arr ## _sz * sizeof(arr[0])); \
830 } \
831 arr[arr ##_count++] = val; \
832 } while (0)
833
834 /* iterator for an instructions's sources (reg), also returns src #: */
835 #define foreach_src_n(__srcreg, __n, __instr) \
836 if ((__instr)->regs_count) \
837 for (unsigned __cnt = (__instr)->regs_count - 1, __n = 0; __n < __cnt; __n++) \
838 if ((__srcreg = (__instr)->regs[__n + 1]))
839
840 /* iterator for an instructions's sources (reg): */
841 #define foreach_src(__srcreg, __instr) \
842 foreach_src_n(__srcreg, __i, __instr)
843
844 static inline unsigned __ssa_src_cnt(struct ir3_instruction *instr)
845 {
846 if (instr->address)
847 return instr->regs_count + 1;
848 return instr->regs_count;
849 }
850
851 static inline struct ir3_instruction * __ssa_src_n(struct ir3_instruction *instr, unsigned n)
852 {
853 if (n == (instr->regs_count + 0))
854 return instr->address;
855 return ssa(instr->regs[n]);
856 }
857
858 #define __src_cnt(__instr) ((__instr)->address ? (__instr)->regs_count : (__instr)->regs_count - 1)
859
860 /* iterator for an instruction's SSA sources (instr), also returns src #: */
861 #define foreach_ssa_src_n(__srcinst, __n, __instr) \
862 if ((__instr)->regs_count) \
863 for (unsigned __cnt = __ssa_src_cnt(__instr), __n = 0; __n < __cnt; __n++) \
864 if ((__srcinst = __ssa_src_n(__instr, __n)))
865
866 /* iterator for an instruction's SSA sources (instr): */
867 #define foreach_ssa_src(__srcinst, __instr) \
868 foreach_ssa_src_n(__srcinst, __i, __instr)
869
870
871 /* dump: */
872 void ir3_print(struct ir3 *ir);
873 void ir3_print_instr(struct ir3_instruction *instr);
874
875 /* depth calculation: */
876 int ir3_delayslots(struct ir3_instruction *assigner,
877 struct ir3_instruction *consumer, unsigned n);
878 void ir3_insert_by_depth(struct ir3_instruction *instr, struct list_head *list);
879 void ir3_depth(struct ir3 *ir);
880
881 /* copy-propagate: */
882 void ir3_cp(struct ir3 *ir);
883
884 /* group neighbors and insert mov's to resolve conflicts: */
885 void ir3_group(struct ir3 *ir);
886
887 /* scheduling: */
888 int ir3_sched(struct ir3 *ir);
889
890 /* register assignment: */
891 struct ir3_ra_reg_set * ir3_ra_alloc_reg_set(void *memctx);
892 int ir3_ra(struct ir3 *ir3, enum shader_t type,
893 bool frag_coord, bool frag_face);
894
895 /* legalize: */
896 void ir3_legalize(struct ir3 *ir, bool *has_samp, int *max_bary);
897
898 /* ************************************************************************* */
899 /* instruction helpers */
900
901 static inline struct ir3_instruction *
902 ir3_MOV(struct ir3_block *block, struct ir3_instruction *src, type_t type)
903 {
904 struct ir3_instruction *instr =
905 ir3_instr_create(block, 1, OPC_MOV);
906 ir3_reg_create(instr, 0, 0); /* dst */
907 if (src->regs[0]->flags & IR3_REG_ARRAY) {
908 struct ir3_register *src_reg =
909 ir3_reg_create(instr, 0, IR3_REG_ARRAY);
910 src_reg->array = src->regs[0]->array;
911 src_reg->instr = src;
912 } else {
913 ir3_reg_create(instr, 0, IR3_REG_SSA)->instr = src;
914 }
915 debug_assert(!(src->regs[0]->flags & IR3_REG_RELATIV));
916 instr->cat1.src_type = type;
917 instr->cat1.dst_type = type;
918 return instr;
919 }
920
921 static inline struct ir3_instruction *
922 ir3_COV(struct ir3_block *block, struct ir3_instruction *src,
923 type_t src_type, type_t dst_type)
924 {
925 struct ir3_instruction *instr =
926 ir3_instr_create(block, 1, OPC_MOV);
927 ir3_reg_create(instr, 0, 0); /* dst */
928 ir3_reg_create(instr, 0, IR3_REG_SSA)->instr = src;
929 instr->cat1.src_type = src_type;
930 instr->cat1.dst_type = dst_type;
931 debug_assert(!(src->regs[0]->flags & IR3_REG_ARRAY));
932 return instr;
933 }
934
935 static inline struct ir3_instruction *
936 ir3_NOP(struct ir3_block *block)
937 {
938 return ir3_instr_create(block, 0, OPC_NOP);
939 }
940
941 #define INSTR0(CAT, name) \
942 static inline struct ir3_instruction * \
943 ir3_##name(struct ir3_block *block) \
944 { \
945 struct ir3_instruction *instr = \
946 ir3_instr_create(block, CAT, OPC_##name); \
947 return instr; \
948 }
949
950 #define INSTR1(CAT, name) \
951 static inline struct ir3_instruction * \
952 ir3_##name(struct ir3_block *block, \
953 struct ir3_instruction *a, unsigned aflags) \
954 { \
955 struct ir3_instruction *instr = \
956 ir3_instr_create(block, CAT, OPC_##name); \
957 ir3_reg_create(instr, 0, 0); /* dst */ \
958 ir3_reg_create(instr, 0, IR3_REG_SSA | aflags)->instr = a; \
959 return instr; \
960 }
961
962 #define INSTR2(CAT, name) \
963 static inline struct ir3_instruction * \
964 ir3_##name(struct ir3_block *block, \
965 struct ir3_instruction *a, unsigned aflags, \
966 struct ir3_instruction *b, unsigned bflags) \
967 { \
968 struct ir3_instruction *instr = \
969 ir3_instr_create(block, CAT, OPC_##name); \
970 ir3_reg_create(instr, 0, 0); /* dst */ \
971 ir3_reg_create(instr, 0, IR3_REG_SSA | aflags)->instr = a; \
972 ir3_reg_create(instr, 0, IR3_REG_SSA | bflags)->instr = b; \
973 return instr; \
974 }
975
976 #define INSTR3(CAT, name) \
977 static inline struct ir3_instruction * \
978 ir3_##name(struct ir3_block *block, \
979 struct ir3_instruction *a, unsigned aflags, \
980 struct ir3_instruction *b, unsigned bflags, \
981 struct ir3_instruction *c, unsigned cflags) \
982 { \
983 struct ir3_instruction *instr = \
984 ir3_instr_create(block, CAT, OPC_##name); \
985 ir3_reg_create(instr, 0, 0); /* dst */ \
986 ir3_reg_create(instr, 0, IR3_REG_SSA | aflags)->instr = a; \
987 ir3_reg_create(instr, 0, IR3_REG_SSA | bflags)->instr = b; \
988 ir3_reg_create(instr, 0, IR3_REG_SSA | cflags)->instr = c; \
989 return instr; \
990 }
991
992 /* cat0 instructions: */
993 INSTR0(0, BR);
994 INSTR0(0, JUMP);
995 INSTR1(0, KILL);
996 INSTR0(0, END);
997
998 /* cat2 instructions, most 2 src but some 1 src: */
999 INSTR2(2, ADD_F)
1000 INSTR2(2, MIN_F)
1001 INSTR2(2, MAX_F)
1002 INSTR2(2, MUL_F)
1003 INSTR1(2, SIGN_F)
1004 INSTR2(2, CMPS_F)
1005 INSTR1(2, ABSNEG_F)
1006 INSTR2(2, CMPV_F)
1007 INSTR1(2, FLOOR_F)
1008 INSTR1(2, CEIL_F)
1009 INSTR1(2, RNDNE_F)
1010 INSTR1(2, RNDAZ_F)
1011 INSTR1(2, TRUNC_F)
1012 INSTR2(2, ADD_U)
1013 INSTR2(2, ADD_S)
1014 INSTR2(2, SUB_U)
1015 INSTR2(2, SUB_S)
1016 INSTR2(2, CMPS_U)
1017 INSTR2(2, CMPS_S)
1018 INSTR2(2, MIN_U)
1019 INSTR2(2, MIN_S)
1020 INSTR2(2, MAX_U)
1021 INSTR2(2, MAX_S)
1022 INSTR1(2, ABSNEG_S)
1023 INSTR2(2, AND_B)
1024 INSTR2(2, OR_B)
1025 INSTR1(2, NOT_B)
1026 INSTR2(2, XOR_B)
1027 INSTR2(2, CMPV_U)
1028 INSTR2(2, CMPV_S)
1029 INSTR2(2, MUL_U)
1030 INSTR2(2, MUL_S)
1031 INSTR2(2, MULL_U)
1032 INSTR1(2, BFREV_B)
1033 INSTR1(2, CLZ_S)
1034 INSTR1(2, CLZ_B)
1035 INSTR2(2, SHL_B)
1036 INSTR2(2, SHR_B)
1037 INSTR2(2, ASHR_B)
1038 INSTR2(2, BARY_F)
1039 INSTR2(2, MGEN_B)
1040 INSTR2(2, GETBIT_B)
1041 INSTR1(2, SETRM)
1042 INSTR1(2, CBITS_B)
1043 INSTR2(2, SHB)
1044 INSTR2(2, MSAD)
1045
1046 /* cat3 instructions: */
1047 INSTR3(3, MAD_U16)
1048 INSTR3(3, MADSH_U16)
1049 INSTR3(3, MAD_S16)
1050 INSTR3(3, MADSH_M16)
1051 INSTR3(3, MAD_U24)
1052 INSTR3(3, MAD_S24)
1053 INSTR3(3, MAD_F16)
1054 INSTR3(3, MAD_F32)
1055 INSTR3(3, SEL_B16)
1056 INSTR3(3, SEL_B32)
1057 INSTR3(3, SEL_S16)
1058 INSTR3(3, SEL_S32)
1059 INSTR3(3, SEL_F16)
1060 INSTR3(3, SEL_F32)
1061 INSTR3(3, SAD_S16)
1062 INSTR3(3, SAD_S32)
1063
1064 /* cat4 instructions: */
1065 INSTR1(4, RCP)
1066 INSTR1(4, RSQ)
1067 INSTR1(4, LOG2)
1068 INSTR1(4, EXP2)
1069 INSTR1(4, SIN)
1070 INSTR1(4, COS)
1071 INSTR1(4, SQRT)
1072
1073 /* cat5 instructions: */
1074 INSTR1(5, DSX)
1075 INSTR1(5, DSY)
1076
1077 static inline struct ir3_instruction *
1078 ir3_SAM(struct ir3_block *block, opc_t opc, type_t type,
1079 unsigned wrmask, unsigned flags, unsigned samp, unsigned tex,
1080 struct ir3_instruction *src0, struct ir3_instruction *src1)
1081 {
1082 struct ir3_instruction *sam;
1083 struct ir3_register *reg;
1084
1085 sam = ir3_instr_create(block, 5, opc);
1086 sam->flags |= flags;
1087 ir3_reg_create(sam, 0, 0)->wrmask = wrmask;
1088 if (src0) {
1089 reg = ir3_reg_create(sam, 0, IR3_REG_SSA);
1090 reg->wrmask = (1 << (src0->regs_count - 1)) - 1;
1091 reg->instr = src0;
1092 }
1093 if (src1) {
1094 reg = ir3_reg_create(sam, 0, IR3_REG_SSA);
1095 reg->instr = src1;
1096 reg->wrmask = (1 << (src1->regs_count - 1)) - 1;
1097 }
1098 sam->cat5.samp = samp;
1099 sam->cat5.tex = tex;
1100 sam->cat5.type = type;
1101
1102 return sam;
1103 }
1104
1105 /* cat6 instructions: */
1106 INSTR2(6, LDLV)
1107 INSTR2(6, LDG)
1108 INSTR3(6, STG)
1109
1110 /* ************************************************************************* */
1111 /* split this out or find some helper to use.. like main/bitset.h.. */
1112
1113 #include <string.h>
1114
1115 #define MAX_REG 256
1116
1117 typedef uint8_t regmask_t[2 * MAX_REG / 8];
1118
1119 static inline unsigned regmask_idx(struct ir3_register *reg)
1120 {
1121 unsigned num = (reg->flags & IR3_REG_RELATIV) ? reg->array.offset : reg->num;
1122 debug_assert(num < MAX_REG);
1123 if (reg->flags & IR3_REG_HALF)
1124 num += MAX_REG;
1125 return num;
1126 }
1127
1128 static inline void regmask_init(regmask_t *regmask)
1129 {
1130 memset(regmask, 0, sizeof(*regmask));
1131 }
1132
1133 static inline void regmask_set(regmask_t *regmask, struct ir3_register *reg)
1134 {
1135 unsigned idx = regmask_idx(reg);
1136 if (reg->flags & IR3_REG_RELATIV) {
1137 unsigned i;
1138 for (i = 0; i < reg->size; i++, idx++)
1139 (*regmask)[idx / 8] |= 1 << (idx % 8);
1140 } else {
1141 unsigned mask;
1142 for (mask = reg->wrmask; mask; mask >>= 1, idx++)
1143 if (mask & 1)
1144 (*regmask)[idx / 8] |= 1 << (idx % 8);
1145 }
1146 }
1147
1148 static inline void regmask_or(regmask_t *dst, regmask_t *a, regmask_t *b)
1149 {
1150 unsigned i;
1151 for (i = 0; i < ARRAY_SIZE(*dst); i++)
1152 (*dst)[i] = (*a)[i] | (*b)[i];
1153 }
1154
1155 /* set bits in a if not set in b, conceptually:
1156 * a |= (reg & ~b)
1157 */
1158 static inline void regmask_set_if_not(regmask_t *a,
1159 struct ir3_register *reg, regmask_t *b)
1160 {
1161 unsigned idx = regmask_idx(reg);
1162 if (reg->flags & IR3_REG_RELATIV) {
1163 unsigned i;
1164 for (i = 0; i < reg->size; i++, idx++)
1165 if (!((*b)[idx / 8] & (1 << (idx % 8))))
1166 (*a)[idx / 8] |= 1 << (idx % 8);
1167 } else {
1168 unsigned mask;
1169 for (mask = reg->wrmask; mask; mask >>= 1, idx++)
1170 if (mask & 1)
1171 if (!((*b)[idx / 8] & (1 << (idx % 8))))
1172 (*a)[idx / 8] |= 1 << (idx % 8);
1173 }
1174 }
1175
1176 static inline bool regmask_get(regmask_t *regmask,
1177 struct ir3_register *reg)
1178 {
1179 unsigned idx = regmask_idx(reg);
1180 if (reg->flags & IR3_REG_RELATIV) {
1181 unsigned i;
1182 for (i = 0; i < reg->size; i++, idx++)
1183 if ((*regmask)[idx / 8] & (1 << (idx % 8)))
1184 return true;
1185 } else {
1186 unsigned mask;
1187 for (mask = reg->wrmask; mask; mask >>= 1, idx++)
1188 if (mask & 1)
1189 if ((*regmask)[idx / 8] & (1 << (idx % 8)))
1190 return true;
1191 }
1192 return false;
1193 }
1194
1195 /* ************************************************************************* */
1196
1197 #endif /* IR3_H_ */