freedreno/ir3: remove ir3_instruction::category
[mesa.git] / src / gallium / drivers / freedreno / ir3 / ir3.h
1 /*
2 * Copyright (c) 2013 Rob Clark <robdclark@gmail.com>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #ifndef IR3_H_
25 #define IR3_H_
26
27 #include <stdint.h>
28 #include <stdbool.h>
29
30 #include "util/u_debug.h"
31 #include "util/list.h"
32
33 #include "instr-a3xx.h"
34 #include "disasm.h" /* TODO move 'enum shader_t' somewhere else.. */
35
36 /* low level intermediate representation of an adreno shader program */
37
38 struct ir3_compiler;
39 struct ir3;
40 struct ir3_instruction;
41 struct ir3_block;
42
43 struct ir3_info {
44 uint32_t gpu_id;
45 uint16_t sizedwords;
46 uint16_t instrs_count; /* expanded to account for rpt's */
47 /* NOTE: max_reg, etc, does not include registers not touched
48 * by the shader (ie. vertex fetched via VFD_DECODE but not
49 * touched by shader)
50 */
51 int8_t max_reg; /* highest GPR # used by shader */
52 int8_t max_half_reg;
53 int16_t max_const;
54 };
55
56 struct ir3_register {
57 enum {
58 IR3_REG_CONST = 0x001,
59 IR3_REG_IMMED = 0x002,
60 IR3_REG_HALF = 0x004,
61 IR3_REG_RELATIV= 0x008,
62 IR3_REG_R = 0x010,
63 /* Most instructions, it seems, can do float abs/neg but not
64 * integer. The CP pass needs to know what is intended (int or
65 * float) in order to do the right thing. For this reason the
66 * abs/neg flags are split out into float and int variants. In
67 * addition, .b (bitwise) operations, the negate is actually a
68 * bitwise not, so split that out into a new flag to make it
69 * more clear.
70 */
71 IR3_REG_FNEG = 0x020,
72 IR3_REG_FABS = 0x040,
73 IR3_REG_SNEG = 0x080,
74 IR3_REG_SABS = 0x100,
75 IR3_REG_BNOT = 0x200,
76 IR3_REG_EVEN = 0x400,
77 IR3_REG_POS_INF= 0x800,
78 /* (ei) flag, end-input? Set on last bary, presumably to signal
79 * that the shader needs no more input:
80 */
81 IR3_REG_EI = 0x1000,
82 /* meta-flags, for intermediate stages of IR, ie.
83 * before register assignment is done:
84 */
85 IR3_REG_SSA = 0x2000, /* 'instr' is ptr to assigning instr */
86 IR3_REG_ARRAY = 0x4000,
87 IR3_REG_PHI_SRC= 0x8000, /* phi src, regs[0]->instr points to phi */
88
89 } flags;
90 union {
91 /* normal registers:
92 * the component is in the low two bits of the reg #, so
93 * rN.x becomes: (N << 2) | x
94 */
95 int num;
96 /* immediate: */
97 int32_t iim_val;
98 uint32_t uim_val;
99 float fim_val;
100 /* relative: */
101 struct {
102 uint16_t id;
103 int16_t offset;
104 } array;
105 };
106
107 /* For IR3_REG_SSA, src registers contain ptr back to assigning
108 * instruction.
109 *
110 * For IR3_REG_ARRAY, the pointer is back to the last dependent
111 * array access (although the net effect is the same, it points
112 * back to a previous instruction that we depend on).
113 */
114 struct ir3_instruction *instr;
115
116 union {
117 /* used for cat5 instructions, but also for internal/IR level
118 * tracking of what registers are read/written by an instruction.
119 * wrmask may be a bad name since it is used to represent both
120 * src and dst that touch multiple adjacent registers.
121 */
122 unsigned wrmask;
123 /* for relative addressing, 32bits for array size is too small,
124 * but otoh we don't need to deal with disjoint sets, so instead
125 * use a simple size field (number of scalar components).
126 */
127 unsigned size;
128 };
129 };
130
131 struct ir3_instruction {
132 struct ir3_block *block;
133 opc_t opc;
134 enum {
135 /* (sy) flag is set on first instruction, and after sample
136 * instructions (probably just on RAW hazard).
137 */
138 IR3_INSTR_SY = 0x001,
139 /* (ss) flag is set on first instruction, and first instruction
140 * to depend on the result of "long" instructions (RAW hazard):
141 *
142 * rcp, rsq, log2, exp2, sin, cos, sqrt
143 *
144 * It seems to synchronize until all in-flight instructions are
145 * completed, for example:
146 *
147 * rsq hr1.w, hr1.w
148 * add.f hr2.z, (neg)hr2.z, hc0.y
149 * mul.f hr2.w, (neg)hr2.y, (neg)hr2.y
150 * rsq hr2.x, hr2.x
151 * (rpt1)nop
152 * mad.f16 hr2.w, hr2.z, hr2.z, hr2.w
153 * nop
154 * mad.f16 hr2.w, (neg)hr0.w, (neg)hr0.w, hr2.w
155 * (ss)(rpt2)mul.f hr1.x, (r)hr1.x, hr1.w
156 * (rpt2)mul.f hr0.x, (neg)(r)hr0.x, hr2.x
157 *
158 * The last mul.f does not have (ss) set, presumably because the
159 * (ss) on the previous instruction does the job.
160 *
161 * The blob driver also seems to set it on WAR hazards, although
162 * not really clear if this is needed or just blob compiler being
163 * sloppy. So far I haven't found a case where removing the (ss)
164 * causes problems for WAR hazard, but I could just be getting
165 * lucky:
166 *
167 * rcp r1.y, r3.y
168 * (ss)(rpt2)mad.f32 r3.y, (r)c9.x, r1.x, (r)r3.z
169 *
170 */
171 IR3_INSTR_SS = 0x002,
172 /* (jp) flag is set on jump targets:
173 */
174 IR3_INSTR_JP = 0x004,
175 IR3_INSTR_UL = 0x008,
176 IR3_INSTR_3D = 0x010,
177 IR3_INSTR_A = 0x020,
178 IR3_INSTR_O = 0x040,
179 IR3_INSTR_P = 0x080,
180 IR3_INSTR_S = 0x100,
181 IR3_INSTR_S2EN = 0x200,
182 IR3_INSTR_G = 0x400,
183 /* meta-flags, for intermediate stages of IR, ie.
184 * before register assignment is done:
185 */
186 IR3_INSTR_MARK = 0x1000,
187 IR3_INSTR_UNUSED= 0x2000,
188 } flags;
189 int repeat;
190 #ifdef DEBUG
191 unsigned regs_max;
192 #endif
193 unsigned regs_count;
194 struct ir3_register **regs;
195 union {
196 struct {
197 char inv;
198 char comp;
199 int immed;
200 struct ir3_block *target;
201 } cat0;
202 struct {
203 type_t src_type, dst_type;
204 } cat1;
205 struct {
206 enum {
207 IR3_COND_LT = 0,
208 IR3_COND_LE = 1,
209 IR3_COND_GT = 2,
210 IR3_COND_GE = 3,
211 IR3_COND_EQ = 4,
212 IR3_COND_NE = 5,
213 } condition;
214 } cat2;
215 struct {
216 unsigned samp, tex;
217 type_t type;
218 } cat5;
219 struct {
220 type_t type;
221 int src_offset;
222 int dst_offset;
223 int iim_val;
224 } cat6;
225 /* for meta-instructions, just used to hold extra data
226 * before instruction scheduling, etc
227 */
228 struct {
229 int off; /* component/offset */
230 } fo;
231 struct {
232 /* used to temporarily hold reference to nir_phi_instr
233 * until we resolve the phi srcs
234 */
235 void *nphi;
236 } phi;
237 struct {
238 struct ir3_block *block;
239 } inout;
240 };
241
242 /* transient values used during various algorithms: */
243 union {
244 /* The instruction depth is the max dependency distance to output.
245 *
246 * You can also think of it as the "cost", if we did any sort of
247 * optimization for register footprint. Ie. a value that is just
248 * result of moving a const to a reg would have a low cost, so to
249 * it could make sense to duplicate the instruction at various
250 * points where the result is needed to reduce register footprint.
251 */
252 unsigned depth;
253 /* When we get to the RA stage, we no longer need depth, but
254 * we do need instruction's position/name:
255 */
256 struct {
257 uint16_t ip;
258 uint16_t name;
259 };
260 };
261
262 /* used for per-pass extra instruction data.
263 */
264 void *data;
265
266 /* Used during CP and RA stages. For fanin and shader inputs/
267 * outputs where we need a sequence of consecutive registers,
268 * keep track of each src instructions left (ie 'n-1') and right
269 * (ie 'n+1') neighbor. The front-end must insert enough mov's
270 * to ensure that each instruction has at most one left and at
271 * most one right neighbor. During the copy-propagation pass,
272 * we only remove mov's when we can preserve this constraint.
273 * And during the RA stage, we use the neighbor information to
274 * allocate a block of registers in one shot.
275 *
276 * TODO: maybe just add something like:
277 * struct ir3_instruction_ref {
278 * struct ir3_instruction *instr;
279 * unsigned cnt;
280 * }
281 *
282 * Or can we get away without the refcnt stuff? It seems like
283 * it should be overkill.. the problem is if, potentially after
284 * already eliminating some mov's, if you have a single mov that
285 * needs to be grouped with it's neighbors in two different
286 * places (ex. shader output and a fanin).
287 */
288 struct {
289 struct ir3_instruction *left, *right;
290 uint16_t left_cnt, right_cnt;
291 } cp;
292
293 /* an instruction can reference at most one address register amongst
294 * it's src/dst registers. Beyond that, you need to insert mov's.
295 *
296 * NOTE: do not write this directly, use ir3_instr_set_address()
297 */
298 struct ir3_instruction *address;
299
300 /* Entry in ir3_block's instruction list: */
301 struct list_head node;
302
303 #ifdef DEBUG
304 uint32_t serialno;
305 #endif
306 };
307
308 static inline struct ir3_instruction *
309 ir3_neighbor_first(struct ir3_instruction *instr)
310 {
311 while (instr->cp.left)
312 instr = instr->cp.left;
313 return instr;
314 }
315
316 static inline int ir3_neighbor_count(struct ir3_instruction *instr)
317 {
318 int num = 1;
319
320 debug_assert(!instr->cp.left);
321
322 while (instr->cp.right) {
323 num++;
324 instr = instr->cp.right;
325 }
326
327 return num;
328 }
329
330 struct ir3_heap_chunk;
331
332 struct ir3 {
333 struct ir3_compiler *compiler;
334
335 unsigned ninputs, noutputs;
336 struct ir3_instruction **inputs;
337 struct ir3_instruction **outputs;
338
339 /* Track bary.f (and ldlv) instructions.. this is needed in
340 * scheduling to ensure that all varying fetches happen before
341 * any potential kill instructions. The hw gets grumpy if all
342 * threads in a group are killed before the last bary.f gets
343 * a chance to signal end of input (ei).
344 */
345 unsigned baryfs_count, baryfs_sz;
346 struct ir3_instruction **baryfs;
347
348 /* Track all indirect instructions (read and write). To avoid
349 * deadlock scenario where an address register gets scheduled,
350 * but other dependent src instructions cannot be scheduled due
351 * to dependency on a *different* address register value, the
352 * scheduler needs to ensure that all dependencies other than
353 * the instruction other than the address register are scheduled
354 * before the one that writes the address register. Having a
355 * convenient list of instructions that reference some address
356 * register simplifies this.
357 */
358 unsigned indirects_count, indirects_sz;
359 struct ir3_instruction **indirects;
360 /* and same for instructions that consume predicate register: */
361 unsigned predicates_count, predicates_sz;
362 struct ir3_instruction **predicates;
363
364 /* Track instructions which do not write a register but other-
365 * wise must not be discarded (such as kill, stg, etc)
366 */
367 unsigned keeps_count, keeps_sz;
368 struct ir3_instruction **keeps;
369
370 /* List of blocks: */
371 struct list_head block_list;
372
373 /* List of ir3_array's: */
374 struct list_head array_list;
375
376 unsigned heap_idx;
377 struct ir3_heap_chunk *chunk;
378 };
379
380 typedef struct nir_variable nir_variable;
381
382 struct ir3_array {
383 struct list_head node;
384 unsigned length;
385 unsigned id;
386
387 nir_variable *var;
388
389 /* We track the last write and last access (read or write) to
390 * setup dependencies on instructions that read or write the
391 * array. Reads can be re-ordered wrt. other reads, but should
392 * not be re-ordered wrt. to writes. Writes cannot be reordered
393 * wrt. any other access to the array.
394 *
395 * So array reads depend on last write, and array writes depend
396 * on the last access.
397 */
398 struct ir3_instruction *last_write, *last_access;
399
400 /* extra stuff used in RA pass: */
401 unsigned base; /* base vreg name */
402 unsigned reg; /* base physical reg */
403 uint16_t start_ip, end_ip;
404 };
405
406 struct ir3_array * ir3_lookup_array(struct ir3 *ir, unsigned id);
407
408 typedef struct nir_block nir_block;
409
410 struct ir3_block {
411 struct list_head node;
412 struct ir3 *shader;
413
414 nir_block *nblock;
415
416 struct list_head instr_list; /* list of ir3_instruction */
417
418 /* each block has either one or two successors.. in case of
419 * two successors, 'condition' decides which one to follow.
420 * A block preceding an if/else has two successors.
421 */
422 struct ir3_instruction *condition;
423 struct ir3_block *successors[2];
424
425 uint16_t start_ip, end_ip;
426
427 /* used for per-pass extra block data. Mainly used right
428 * now in RA step to track livein/liveout.
429 */
430 void *data;
431
432 #ifdef DEBUG
433 uint32_t serialno;
434 #endif
435 };
436
437 struct ir3 * ir3_create(struct ir3_compiler *compiler,
438 unsigned nin, unsigned nout);
439 void ir3_destroy(struct ir3 *shader);
440 void * ir3_assemble(struct ir3 *shader,
441 struct ir3_info *info, uint32_t gpu_id);
442 void * ir3_alloc(struct ir3 *shader, int sz);
443
444 struct ir3_block * ir3_block_create(struct ir3 *shader);
445
446 struct ir3_instruction * ir3_instr_create(struct ir3_block *block,
447 int category, opc_t opc);
448 struct ir3_instruction * ir3_instr_create2(struct ir3_block *block,
449 int category, opc_t opc, int nreg);
450 struct ir3_instruction * ir3_instr_clone(struct ir3_instruction *instr);
451 const char *ir3_instr_name(struct ir3_instruction *instr);
452
453 struct ir3_register * ir3_reg_create(struct ir3_instruction *instr,
454 int num, int flags);
455 struct ir3_register * ir3_reg_clone(struct ir3 *shader,
456 struct ir3_register *reg);
457
458 void ir3_instr_set_address(struct ir3_instruction *instr,
459 struct ir3_instruction *addr);
460
461 static inline bool ir3_instr_check_mark(struct ir3_instruction *instr)
462 {
463 if (instr->flags & IR3_INSTR_MARK)
464 return true; /* already visited */
465 instr->flags |= IR3_INSTR_MARK;
466 return false;
467 }
468
469 void ir3_block_clear_mark(struct ir3_block *block);
470 void ir3_clear_mark(struct ir3 *shader);
471
472 unsigned ir3_count_instructions(struct ir3 *ir);
473
474 static inline int ir3_instr_regno(struct ir3_instruction *instr,
475 struct ir3_register *reg)
476 {
477 unsigned i;
478 for (i = 0; i < instr->regs_count; i++)
479 if (reg == instr->regs[i])
480 return i;
481 return -1;
482 }
483
484
485 #define MAX_ARRAYS 16
486
487 /* comp:
488 * 0 - x
489 * 1 - y
490 * 2 - z
491 * 3 - w
492 */
493 static inline uint32_t regid(int num, int comp)
494 {
495 return (num << 2) | (comp & 0x3);
496 }
497
498 static inline uint32_t reg_num(struct ir3_register *reg)
499 {
500 return reg->num >> 2;
501 }
502
503 static inline uint32_t reg_comp(struct ir3_register *reg)
504 {
505 return reg->num & 0x3;
506 }
507
508 static inline bool is_flow(struct ir3_instruction *instr)
509 {
510 return (opc_cat(instr->opc) == 0);
511 }
512
513 static inline bool is_kill(struct ir3_instruction *instr)
514 {
515 return instr->opc == OPC_KILL;
516 }
517
518 static inline bool is_nop(struct ir3_instruction *instr)
519 {
520 return instr->opc == OPC_NOP;
521 }
522
523 /* Is it a non-transformative (ie. not type changing) mov? This can
524 * also include absneg.s/absneg.f, which for the most part can be
525 * treated as a mov (single src argument).
526 */
527 static inline bool is_same_type_mov(struct ir3_instruction *instr)
528 {
529 struct ir3_register *dst = instr->regs[0];
530
531 /* mov's that write to a0.x or p0.x are special: */
532 if (dst->num == regid(REG_P0, 0))
533 return false;
534 if (dst->num == regid(REG_A0, 0))
535 return false;
536
537 if (dst->flags & (IR3_REG_RELATIV | IR3_REG_ARRAY))
538 return false;
539
540 switch (instr->opc) {
541 case OPC_MOV:
542 return instr->cat1.src_type == instr->cat1.dst_type;
543 case OPC_ABSNEG_F:
544 case OPC_ABSNEG_S:
545 return true;
546 default:
547 return false;
548 }
549 }
550
551 static inline bool is_alu(struct ir3_instruction *instr)
552 {
553 return (1 <= opc_cat(instr->opc)) && (opc_cat(instr->opc) <= 3);
554 }
555
556 static inline bool is_sfu(struct ir3_instruction *instr)
557 {
558 return (opc_cat(instr->opc) == 4);
559 }
560
561 static inline bool is_tex(struct ir3_instruction *instr)
562 {
563 return (opc_cat(instr->opc) == 5);
564 }
565
566 static inline bool is_mem(struct ir3_instruction *instr)
567 {
568 return (opc_cat(instr->opc) == 6);
569 }
570
571 static inline bool
572 is_store(struct ir3_instruction *instr)
573 {
574 /* these instructions, the "destination" register is
575 * actually a source, the address to store to.
576 */
577 switch (instr->opc) {
578 case OPC_STG:
579 case OPC_STP:
580 case OPC_STL:
581 case OPC_STLW:
582 case OPC_L2G:
583 case OPC_G2L:
584 return true;
585 default:
586 return false;
587 }
588 }
589
590 static inline bool is_load(struct ir3_instruction *instr)
591 {
592 switch (instr->opc) {
593 case OPC_LDG:
594 case OPC_LDL:
595 case OPC_LDP:
596 case OPC_L2G:
597 case OPC_LDLW:
598 case OPC_LDC_4:
599 case OPC_LDLV:
600 /* probably some others too.. */
601 return true;
602 default:
603 return false;
604 }
605 }
606
607 static inline bool is_input(struct ir3_instruction *instr)
608 {
609 /* in some cases, ldlv is used to fetch varying without
610 * interpolation.. fortunately inloc is the first src
611 * register in either case
612 */
613 switch (instr->opc) {
614 case OPC_LDLV:
615 case OPC_BARY_F:
616 return true;
617 default:
618 return false;
619 }
620 }
621
622 static inline bool is_meta(struct ir3_instruction *instr)
623 {
624 /* TODO how should we count PHI (and maybe fan-in/out) which
625 * might actually contribute some instructions to the final
626 * result?
627 */
628 return (opc_cat(instr->opc) == -1);
629 }
630
631 static inline bool writes_addr(struct ir3_instruction *instr)
632 {
633 if (instr->regs_count > 0) {
634 struct ir3_register *dst = instr->regs[0];
635 return reg_num(dst) == REG_A0;
636 }
637 return false;
638 }
639
640 static inline bool writes_pred(struct ir3_instruction *instr)
641 {
642 if (instr->regs_count > 0) {
643 struct ir3_register *dst = instr->regs[0];
644 return reg_num(dst) == REG_P0;
645 }
646 return false;
647 }
648
649 /* returns defining instruction for reg */
650 /* TODO better name */
651 static inline struct ir3_instruction *ssa(struct ir3_register *reg)
652 {
653 if (reg->flags & (IR3_REG_SSA | IR3_REG_ARRAY)) {
654 debug_assert(!(reg->instr && (reg->instr->flags & IR3_INSTR_UNUSED)));
655 return reg->instr;
656 }
657 return NULL;
658 }
659
660 static inline bool conflicts(struct ir3_instruction *a,
661 struct ir3_instruction *b)
662 {
663 return (a && b) && (a != b);
664 }
665
666 static inline bool reg_gpr(struct ir3_register *r)
667 {
668 if (r->flags & (IR3_REG_CONST | IR3_REG_IMMED))
669 return false;
670 if ((reg_num(r) == REG_A0) || (reg_num(r) == REG_P0))
671 return false;
672 return true;
673 }
674
675 static inline type_t half_type(type_t type)
676 {
677 switch (type) {
678 case TYPE_F32: return TYPE_F16;
679 case TYPE_U32: return TYPE_U16;
680 case TYPE_S32: return TYPE_S16;
681 case TYPE_F16:
682 case TYPE_U16:
683 case TYPE_S16:
684 return type;
685 default:
686 assert(0);
687 return ~0;
688 }
689 }
690
691 /* some cat2 instructions (ie. those which are not float) can embed an
692 * immediate:
693 */
694 static inline bool ir3_cat2_int(opc_t opc)
695 {
696 switch (opc) {
697 case OPC_ADD_U:
698 case OPC_ADD_S:
699 case OPC_SUB_U:
700 case OPC_SUB_S:
701 case OPC_CMPS_U:
702 case OPC_CMPS_S:
703 case OPC_MIN_U:
704 case OPC_MIN_S:
705 case OPC_MAX_U:
706 case OPC_MAX_S:
707 case OPC_CMPV_U:
708 case OPC_CMPV_S:
709 case OPC_MUL_U:
710 case OPC_MUL_S:
711 case OPC_MULL_U:
712 case OPC_CLZ_S:
713 case OPC_ABSNEG_S:
714 case OPC_AND_B:
715 case OPC_OR_B:
716 case OPC_NOT_B:
717 case OPC_XOR_B:
718 case OPC_BFREV_B:
719 case OPC_CLZ_B:
720 case OPC_SHL_B:
721 case OPC_SHR_B:
722 case OPC_ASHR_B:
723 case OPC_MGEN_B:
724 case OPC_GETBIT_B:
725 case OPC_CBITS_B:
726 case OPC_BARY_F:
727 return true;
728
729 default:
730 return false;
731 }
732 }
733
734
735 /* map cat2 instruction to valid abs/neg flags: */
736 static inline unsigned ir3_cat2_absneg(opc_t opc)
737 {
738 switch (opc) {
739 case OPC_ADD_F:
740 case OPC_MIN_F:
741 case OPC_MAX_F:
742 case OPC_MUL_F:
743 case OPC_SIGN_F:
744 case OPC_CMPS_F:
745 case OPC_ABSNEG_F:
746 case OPC_CMPV_F:
747 case OPC_FLOOR_F:
748 case OPC_CEIL_F:
749 case OPC_RNDNE_F:
750 case OPC_RNDAZ_F:
751 case OPC_TRUNC_F:
752 case OPC_BARY_F:
753 return IR3_REG_FABS | IR3_REG_FNEG;
754
755 case OPC_ADD_U:
756 case OPC_ADD_S:
757 case OPC_SUB_U:
758 case OPC_SUB_S:
759 case OPC_CMPS_U:
760 case OPC_CMPS_S:
761 case OPC_MIN_U:
762 case OPC_MIN_S:
763 case OPC_MAX_U:
764 case OPC_MAX_S:
765 case OPC_CMPV_U:
766 case OPC_CMPV_S:
767 case OPC_MUL_U:
768 case OPC_MUL_S:
769 case OPC_MULL_U:
770 case OPC_CLZ_S:
771 return 0;
772
773 case OPC_ABSNEG_S:
774 return IR3_REG_SABS | IR3_REG_SNEG;
775
776 case OPC_AND_B:
777 case OPC_OR_B:
778 case OPC_NOT_B:
779 case OPC_XOR_B:
780 case OPC_BFREV_B:
781 case OPC_CLZ_B:
782 case OPC_SHL_B:
783 case OPC_SHR_B:
784 case OPC_ASHR_B:
785 case OPC_MGEN_B:
786 case OPC_GETBIT_B:
787 case OPC_CBITS_B:
788 return IR3_REG_BNOT;
789
790 default:
791 return 0;
792 }
793 }
794
795 /* map cat3 instructions to valid abs/neg flags: */
796 static inline unsigned ir3_cat3_absneg(opc_t opc)
797 {
798 switch (opc) {
799 case OPC_MAD_F16:
800 case OPC_MAD_F32:
801 case OPC_SEL_F16:
802 case OPC_SEL_F32:
803 return IR3_REG_FNEG;
804
805 case OPC_MAD_U16:
806 case OPC_MADSH_U16:
807 case OPC_MAD_S16:
808 case OPC_MADSH_M16:
809 case OPC_MAD_U24:
810 case OPC_MAD_S24:
811 case OPC_SEL_S16:
812 case OPC_SEL_S32:
813 case OPC_SAD_S16:
814 case OPC_SAD_S32:
815 /* neg *may* work on 3rd src.. */
816
817 case OPC_SEL_B16:
818 case OPC_SEL_B32:
819
820 default:
821 return 0;
822 }
823 }
824
825 #define array_insert(arr, val) do { \
826 if (arr ## _count == arr ## _sz) { \
827 arr ## _sz = MAX2(2 * arr ## _sz, 16); \
828 arr = realloc(arr, arr ## _sz * sizeof(arr[0])); \
829 } \
830 arr[arr ##_count++] = val; \
831 } while (0)
832
833 /* iterator for an instructions's sources (reg), also returns src #: */
834 #define foreach_src_n(__srcreg, __n, __instr) \
835 if ((__instr)->regs_count) \
836 for (unsigned __cnt = (__instr)->regs_count - 1, __n = 0; __n < __cnt; __n++) \
837 if ((__srcreg = (__instr)->regs[__n + 1]))
838
839 /* iterator for an instructions's sources (reg): */
840 #define foreach_src(__srcreg, __instr) \
841 foreach_src_n(__srcreg, __i, __instr)
842
843 static inline unsigned __ssa_src_cnt(struct ir3_instruction *instr)
844 {
845 if (instr->address)
846 return instr->regs_count + 1;
847 return instr->regs_count;
848 }
849
850 static inline struct ir3_instruction * __ssa_src_n(struct ir3_instruction *instr, unsigned n)
851 {
852 if (n == (instr->regs_count + 0))
853 return instr->address;
854 return ssa(instr->regs[n]);
855 }
856
857 #define __src_cnt(__instr) ((__instr)->address ? (__instr)->regs_count : (__instr)->regs_count - 1)
858
859 /* iterator for an instruction's SSA sources (instr), also returns src #: */
860 #define foreach_ssa_src_n(__srcinst, __n, __instr) \
861 if ((__instr)->regs_count) \
862 for (unsigned __cnt = __ssa_src_cnt(__instr), __n = 0; __n < __cnt; __n++) \
863 if ((__srcinst = __ssa_src_n(__instr, __n)))
864
865 /* iterator for an instruction's SSA sources (instr): */
866 #define foreach_ssa_src(__srcinst, __instr) \
867 foreach_ssa_src_n(__srcinst, __i, __instr)
868
869
870 /* dump: */
871 void ir3_print(struct ir3 *ir);
872 void ir3_print_instr(struct ir3_instruction *instr);
873
874 /* depth calculation: */
875 int ir3_delayslots(struct ir3_instruction *assigner,
876 struct ir3_instruction *consumer, unsigned n);
877 void ir3_insert_by_depth(struct ir3_instruction *instr, struct list_head *list);
878 void ir3_depth(struct ir3 *ir);
879
880 /* copy-propagate: */
881 void ir3_cp(struct ir3 *ir);
882
883 /* group neighbors and insert mov's to resolve conflicts: */
884 void ir3_group(struct ir3 *ir);
885
886 /* scheduling: */
887 int ir3_sched(struct ir3 *ir);
888
889 /* register assignment: */
890 struct ir3_ra_reg_set * ir3_ra_alloc_reg_set(void *memctx);
891 int ir3_ra(struct ir3 *ir3, enum shader_t type,
892 bool frag_coord, bool frag_face);
893
894 /* legalize: */
895 void ir3_legalize(struct ir3 *ir, bool *has_samp, int *max_bary);
896
897 /* ************************************************************************* */
898 /* instruction helpers */
899
900 static inline struct ir3_instruction *
901 ir3_MOV(struct ir3_block *block, struct ir3_instruction *src, type_t type)
902 {
903 struct ir3_instruction *instr =
904 ir3_instr_create(block, 1, OPC_MOV);
905 ir3_reg_create(instr, 0, 0); /* dst */
906 if (src->regs[0]->flags & IR3_REG_ARRAY) {
907 struct ir3_register *src_reg =
908 ir3_reg_create(instr, 0, IR3_REG_ARRAY);
909 src_reg->array = src->regs[0]->array;
910 src_reg->instr = src;
911 } else {
912 ir3_reg_create(instr, 0, IR3_REG_SSA)->instr = src;
913 }
914 debug_assert(!(src->regs[0]->flags & IR3_REG_RELATIV));
915 instr->cat1.src_type = type;
916 instr->cat1.dst_type = type;
917 return instr;
918 }
919
920 static inline struct ir3_instruction *
921 ir3_COV(struct ir3_block *block, struct ir3_instruction *src,
922 type_t src_type, type_t dst_type)
923 {
924 struct ir3_instruction *instr =
925 ir3_instr_create(block, 1, OPC_MOV);
926 ir3_reg_create(instr, 0, 0); /* dst */
927 ir3_reg_create(instr, 0, IR3_REG_SSA)->instr = src;
928 instr->cat1.src_type = src_type;
929 instr->cat1.dst_type = dst_type;
930 debug_assert(!(src->regs[0]->flags & IR3_REG_ARRAY));
931 return instr;
932 }
933
934 static inline struct ir3_instruction *
935 ir3_NOP(struct ir3_block *block)
936 {
937 return ir3_instr_create(block, 0, OPC_NOP);
938 }
939
940 #define INSTR0(CAT, name) \
941 static inline struct ir3_instruction * \
942 ir3_##name(struct ir3_block *block) \
943 { \
944 struct ir3_instruction *instr = \
945 ir3_instr_create(block, CAT, OPC_##name); \
946 return instr; \
947 }
948
949 #define INSTR1(CAT, name) \
950 static inline struct ir3_instruction * \
951 ir3_##name(struct ir3_block *block, \
952 struct ir3_instruction *a, unsigned aflags) \
953 { \
954 struct ir3_instruction *instr = \
955 ir3_instr_create(block, CAT, OPC_##name); \
956 ir3_reg_create(instr, 0, 0); /* dst */ \
957 ir3_reg_create(instr, 0, IR3_REG_SSA | aflags)->instr = a; \
958 return instr; \
959 }
960
961 #define INSTR2(CAT, name) \
962 static inline struct ir3_instruction * \
963 ir3_##name(struct ir3_block *block, \
964 struct ir3_instruction *a, unsigned aflags, \
965 struct ir3_instruction *b, unsigned bflags) \
966 { \
967 struct ir3_instruction *instr = \
968 ir3_instr_create(block, CAT, OPC_##name); \
969 ir3_reg_create(instr, 0, 0); /* dst */ \
970 ir3_reg_create(instr, 0, IR3_REG_SSA | aflags)->instr = a; \
971 ir3_reg_create(instr, 0, IR3_REG_SSA | bflags)->instr = b; \
972 return instr; \
973 }
974
975 #define INSTR3(CAT, name) \
976 static inline struct ir3_instruction * \
977 ir3_##name(struct ir3_block *block, \
978 struct ir3_instruction *a, unsigned aflags, \
979 struct ir3_instruction *b, unsigned bflags, \
980 struct ir3_instruction *c, unsigned cflags) \
981 { \
982 struct ir3_instruction *instr = \
983 ir3_instr_create(block, CAT, OPC_##name); \
984 ir3_reg_create(instr, 0, 0); /* dst */ \
985 ir3_reg_create(instr, 0, IR3_REG_SSA | aflags)->instr = a; \
986 ir3_reg_create(instr, 0, IR3_REG_SSA | bflags)->instr = b; \
987 ir3_reg_create(instr, 0, IR3_REG_SSA | cflags)->instr = c; \
988 return instr; \
989 }
990
991 /* cat0 instructions: */
992 INSTR0(0, BR);
993 INSTR0(0, JUMP);
994 INSTR1(0, KILL);
995 INSTR0(0, END);
996
997 /* cat2 instructions, most 2 src but some 1 src: */
998 INSTR2(2, ADD_F)
999 INSTR2(2, MIN_F)
1000 INSTR2(2, MAX_F)
1001 INSTR2(2, MUL_F)
1002 INSTR1(2, SIGN_F)
1003 INSTR2(2, CMPS_F)
1004 INSTR1(2, ABSNEG_F)
1005 INSTR2(2, CMPV_F)
1006 INSTR1(2, FLOOR_F)
1007 INSTR1(2, CEIL_F)
1008 INSTR1(2, RNDNE_F)
1009 INSTR1(2, RNDAZ_F)
1010 INSTR1(2, TRUNC_F)
1011 INSTR2(2, ADD_U)
1012 INSTR2(2, ADD_S)
1013 INSTR2(2, SUB_U)
1014 INSTR2(2, SUB_S)
1015 INSTR2(2, CMPS_U)
1016 INSTR2(2, CMPS_S)
1017 INSTR2(2, MIN_U)
1018 INSTR2(2, MIN_S)
1019 INSTR2(2, MAX_U)
1020 INSTR2(2, MAX_S)
1021 INSTR1(2, ABSNEG_S)
1022 INSTR2(2, AND_B)
1023 INSTR2(2, OR_B)
1024 INSTR1(2, NOT_B)
1025 INSTR2(2, XOR_B)
1026 INSTR2(2, CMPV_U)
1027 INSTR2(2, CMPV_S)
1028 INSTR2(2, MUL_U)
1029 INSTR2(2, MUL_S)
1030 INSTR2(2, MULL_U)
1031 INSTR1(2, BFREV_B)
1032 INSTR1(2, CLZ_S)
1033 INSTR1(2, CLZ_B)
1034 INSTR2(2, SHL_B)
1035 INSTR2(2, SHR_B)
1036 INSTR2(2, ASHR_B)
1037 INSTR2(2, BARY_F)
1038 INSTR2(2, MGEN_B)
1039 INSTR2(2, GETBIT_B)
1040 INSTR1(2, SETRM)
1041 INSTR1(2, CBITS_B)
1042 INSTR2(2, SHB)
1043 INSTR2(2, MSAD)
1044
1045 /* cat3 instructions: */
1046 INSTR3(3, MAD_U16)
1047 INSTR3(3, MADSH_U16)
1048 INSTR3(3, MAD_S16)
1049 INSTR3(3, MADSH_M16)
1050 INSTR3(3, MAD_U24)
1051 INSTR3(3, MAD_S24)
1052 INSTR3(3, MAD_F16)
1053 INSTR3(3, MAD_F32)
1054 INSTR3(3, SEL_B16)
1055 INSTR3(3, SEL_B32)
1056 INSTR3(3, SEL_S16)
1057 INSTR3(3, SEL_S32)
1058 INSTR3(3, SEL_F16)
1059 INSTR3(3, SEL_F32)
1060 INSTR3(3, SAD_S16)
1061 INSTR3(3, SAD_S32)
1062
1063 /* cat4 instructions: */
1064 INSTR1(4, RCP)
1065 INSTR1(4, RSQ)
1066 INSTR1(4, LOG2)
1067 INSTR1(4, EXP2)
1068 INSTR1(4, SIN)
1069 INSTR1(4, COS)
1070 INSTR1(4, SQRT)
1071
1072 /* cat5 instructions: */
1073 INSTR1(5, DSX)
1074 INSTR1(5, DSY)
1075
1076 static inline struct ir3_instruction *
1077 ir3_SAM(struct ir3_block *block, opc_t opc, type_t type,
1078 unsigned wrmask, unsigned flags, unsigned samp, unsigned tex,
1079 struct ir3_instruction *src0, struct ir3_instruction *src1)
1080 {
1081 struct ir3_instruction *sam;
1082 struct ir3_register *reg;
1083
1084 sam = ir3_instr_create(block, 5, opc);
1085 sam->flags |= flags;
1086 ir3_reg_create(sam, 0, 0)->wrmask = wrmask;
1087 if (src0) {
1088 reg = ir3_reg_create(sam, 0, IR3_REG_SSA);
1089 reg->wrmask = (1 << (src0->regs_count - 1)) - 1;
1090 reg->instr = src0;
1091 }
1092 if (src1) {
1093 reg = ir3_reg_create(sam, 0, IR3_REG_SSA);
1094 reg->instr = src1;
1095 reg->wrmask = (1 << (src1->regs_count - 1)) - 1;
1096 }
1097 sam->cat5.samp = samp;
1098 sam->cat5.tex = tex;
1099 sam->cat5.type = type;
1100
1101 return sam;
1102 }
1103
1104 /* cat6 instructions: */
1105 INSTR2(6, LDLV)
1106 INSTR2(6, LDG)
1107 INSTR3(6, STG)
1108
1109 /* ************************************************************************* */
1110 /* split this out or find some helper to use.. like main/bitset.h.. */
1111
1112 #include <string.h>
1113
1114 #define MAX_REG 256
1115
1116 typedef uint8_t regmask_t[2 * MAX_REG / 8];
1117
1118 static inline unsigned regmask_idx(struct ir3_register *reg)
1119 {
1120 unsigned num = (reg->flags & IR3_REG_RELATIV) ? reg->array.offset : reg->num;
1121 debug_assert(num < MAX_REG);
1122 if (reg->flags & IR3_REG_HALF)
1123 num += MAX_REG;
1124 return num;
1125 }
1126
1127 static inline void regmask_init(regmask_t *regmask)
1128 {
1129 memset(regmask, 0, sizeof(*regmask));
1130 }
1131
1132 static inline void regmask_set(regmask_t *regmask, struct ir3_register *reg)
1133 {
1134 unsigned idx = regmask_idx(reg);
1135 if (reg->flags & IR3_REG_RELATIV) {
1136 unsigned i;
1137 for (i = 0; i < reg->size; i++, idx++)
1138 (*regmask)[idx / 8] |= 1 << (idx % 8);
1139 } else {
1140 unsigned mask;
1141 for (mask = reg->wrmask; mask; mask >>= 1, idx++)
1142 if (mask & 1)
1143 (*regmask)[idx / 8] |= 1 << (idx % 8);
1144 }
1145 }
1146
1147 static inline void regmask_or(regmask_t *dst, regmask_t *a, regmask_t *b)
1148 {
1149 unsigned i;
1150 for (i = 0; i < ARRAY_SIZE(*dst); i++)
1151 (*dst)[i] = (*a)[i] | (*b)[i];
1152 }
1153
1154 /* set bits in a if not set in b, conceptually:
1155 * a |= (reg & ~b)
1156 */
1157 static inline void regmask_set_if_not(regmask_t *a,
1158 struct ir3_register *reg, regmask_t *b)
1159 {
1160 unsigned idx = regmask_idx(reg);
1161 if (reg->flags & IR3_REG_RELATIV) {
1162 unsigned i;
1163 for (i = 0; i < reg->size; i++, idx++)
1164 if (!((*b)[idx / 8] & (1 << (idx % 8))))
1165 (*a)[idx / 8] |= 1 << (idx % 8);
1166 } else {
1167 unsigned mask;
1168 for (mask = reg->wrmask; mask; mask >>= 1, idx++)
1169 if (mask & 1)
1170 if (!((*b)[idx / 8] & (1 << (idx % 8))))
1171 (*a)[idx / 8] |= 1 << (idx % 8);
1172 }
1173 }
1174
1175 static inline bool regmask_get(regmask_t *regmask,
1176 struct ir3_register *reg)
1177 {
1178 unsigned idx = regmask_idx(reg);
1179 if (reg->flags & IR3_REG_RELATIV) {
1180 unsigned i;
1181 for (i = 0; i < reg->size; i++, idx++)
1182 if ((*regmask)[idx / 8] & (1 << (idx % 8)))
1183 return true;
1184 } else {
1185 unsigned mask;
1186 for (mask = reg->wrmask; mask; mask >>= 1, idx++)
1187 if (mask & 1)
1188 if ((*regmask)[idx / 8] & (1 << (idx % 8)))
1189 return true;
1190 }
1191 return false;
1192 }
1193
1194 /* ************************************************************************* */
1195
1196 #endif /* IR3_H_ */