freedreno/ir3: make foreach_ssa_src declar cursor ptr
[mesa.git] / src / freedreno / ir3 / ir3.c
1 /*
2 * Copyright (c) 2012 Rob Clark <robdclark@gmail.com>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #include "ir3.h"
25
26 #include <stdlib.h>
27 #include <stdio.h>
28 #include <string.h>
29 #include <assert.h>
30 #include <stdbool.h>
31 #include <errno.h>
32
33 #include "util/bitscan.h"
34 #include "util/ralloc.h"
35 #include "util/u_math.h"
36
37 #include "instr-a3xx.h"
38 #include "ir3_compiler.h"
39
40 /* simple allocator to carve allocations out of an up-front allocated heap,
41 * so that we can free everything easily in one shot.
42 */
43 void * ir3_alloc(struct ir3 *shader, int sz)
44 {
45 return rzalloc_size(shader, sz); /* TODO: don't use rzalloc */
46 }
47
48 struct ir3 * ir3_create(struct ir3_compiler *compiler, gl_shader_stage type)
49 {
50 struct ir3 *shader = rzalloc(NULL, struct ir3);
51
52 shader->compiler = compiler;
53 shader->type = type;
54
55 list_inithead(&shader->block_list);
56 list_inithead(&shader->array_list);
57
58 return shader;
59 }
60
61 void ir3_destroy(struct ir3 *shader)
62 {
63 ralloc_free(shader);
64 }
65
66 #define iassert(cond) do { \
67 if (!(cond)) { \
68 debug_assert(cond); \
69 return -1; \
70 } } while (0)
71
72 #define iassert_type(reg, full) do { \
73 if ((full)) { \
74 iassert(!((reg)->flags & IR3_REG_HALF)); \
75 } else { \
76 iassert((reg)->flags & IR3_REG_HALF); \
77 } } while (0);
78
79 static uint32_t reg(struct ir3_register *reg, struct ir3_info *info,
80 uint32_t repeat, uint32_t valid_flags)
81 {
82 reg_t val = { .dummy32 = 0 };
83
84 if (reg->flags & ~valid_flags) {
85 debug_printf("INVALID FLAGS: %x vs %x\n",
86 reg->flags, valid_flags);
87 }
88
89 if (!(reg->flags & IR3_REG_R))
90 repeat = 0;
91
92 if (reg->flags & IR3_REG_IMMED) {
93 val.iim_val = reg->iim_val;
94 } else {
95 unsigned components;
96 int16_t max;
97
98 if (reg->flags & IR3_REG_RELATIV) {
99 components = reg->size;
100 val.idummy10 = reg->array.offset;
101 max = (reg->array.offset + repeat + components - 1);
102 } else {
103 components = util_last_bit(reg->wrmask);
104 val.comp = reg->num & 0x3;
105 val.num = reg->num >> 2;
106 max = (reg->num + repeat + components - 1);
107 }
108
109 if (reg->flags & IR3_REG_CONST) {
110 info->max_const = MAX2(info->max_const, max >> 2);
111 } else if (val.num == 63) {
112 /* ignore writes to dummy register r63.x */
113 } else if (max < regid(48, 0)) {
114 if (reg->flags & IR3_REG_HALF) {
115 if (info->gpu_id >= 600) {
116 /* starting w/ a6xx, half regs conflict with full regs: */
117 info->max_reg = MAX2(info->max_reg, max >> 3);
118 } else {
119 info->max_half_reg = MAX2(info->max_half_reg, max >> 2);
120 }
121 } else {
122 info->max_reg = MAX2(info->max_reg, max >> 2);
123 }
124 }
125 }
126
127 return val.dummy32;
128 }
129
130 static int emit_cat0(struct ir3_instruction *instr, void *ptr,
131 struct ir3_info *info)
132 {
133 instr_cat0_t *cat0 = ptr;
134
135 if (info->gpu_id >= 500) {
136 cat0->a5xx.immed = instr->cat0.immed;
137 } else if (info->gpu_id >= 400) {
138 cat0->a4xx.immed = instr->cat0.immed;
139 } else {
140 cat0->a3xx.immed = instr->cat0.immed;
141 }
142 cat0->repeat = instr->repeat;
143 cat0->ss = !!(instr->flags & IR3_INSTR_SS);
144 cat0->inv0 = instr->cat0.inv;
145 cat0->comp0 = instr->cat0.comp;
146 cat0->opc = instr->opc;
147 cat0->opc_hi = instr->opc >= 16;
148 cat0->jmp_tgt = !!(instr->flags & IR3_INSTR_JP);
149 cat0->sync = !!(instr->flags & IR3_INSTR_SY);
150 cat0->opc_cat = 0;
151
152 return 0;
153 }
154
155 static int emit_cat1(struct ir3_instruction *instr, void *ptr,
156 struct ir3_info *info)
157 {
158 struct ir3_register *dst = instr->regs[0];
159 struct ir3_register *src = instr->regs[1];
160 instr_cat1_t *cat1 = ptr;
161
162 iassert(instr->regs_count == 2);
163 iassert_type(dst, type_size(instr->cat1.dst_type) == 32);
164 if (!(src->flags & IR3_REG_IMMED))
165 iassert_type(src, type_size(instr->cat1.src_type) == 32);
166
167 if (src->flags & IR3_REG_IMMED) {
168 cat1->iim_val = src->iim_val;
169 cat1->src_im = 1;
170 } else if (src->flags & IR3_REG_RELATIV) {
171 cat1->off = reg(src, info, instr->repeat,
172 IR3_REG_R | IR3_REG_CONST | IR3_REG_HALF | IR3_REG_RELATIV);
173 cat1->src_rel = 1;
174 cat1->src_rel_c = !!(src->flags & IR3_REG_CONST);
175 } else {
176 cat1->src = reg(src, info, instr->repeat,
177 IR3_REG_R | IR3_REG_CONST | IR3_REG_HALF);
178 cat1->src_c = !!(src->flags & IR3_REG_CONST);
179 }
180
181 cat1->dst = reg(dst, info, instr->repeat,
182 IR3_REG_RELATIV | IR3_REG_EVEN |
183 IR3_REG_R | IR3_REG_POS_INF | IR3_REG_HALF);
184 cat1->repeat = instr->repeat;
185 cat1->src_r = !!(src->flags & IR3_REG_R);
186 cat1->ss = !!(instr->flags & IR3_INSTR_SS);
187 cat1->ul = !!(instr->flags & IR3_INSTR_UL);
188 cat1->dst_type = instr->cat1.dst_type;
189 cat1->dst_rel = !!(dst->flags & IR3_REG_RELATIV);
190 cat1->src_type = instr->cat1.src_type;
191 cat1->even = !!(dst->flags & IR3_REG_EVEN);
192 cat1->pos_inf = !!(dst->flags & IR3_REG_POS_INF);
193 cat1->jmp_tgt = !!(instr->flags & IR3_INSTR_JP);
194 cat1->sync = !!(instr->flags & IR3_INSTR_SY);
195 cat1->opc_cat = 1;
196
197 return 0;
198 }
199
200 static int emit_cat2(struct ir3_instruction *instr, void *ptr,
201 struct ir3_info *info)
202 {
203 struct ir3_register *dst = instr->regs[0];
204 struct ir3_register *src1 = instr->regs[1];
205 struct ir3_register *src2 = instr->regs[2];
206 instr_cat2_t *cat2 = ptr;
207 unsigned absneg = ir3_cat2_absneg(instr->opc);
208
209 iassert((instr->regs_count == 2) || (instr->regs_count == 3));
210
211 if (instr->nop) {
212 iassert(!instr->repeat);
213 iassert(instr->nop <= 3);
214
215 cat2->src1_r = instr->nop & 0x1;
216 cat2->src2_r = (instr->nop >> 1) & 0x1;
217 } else {
218 cat2->src1_r = !!(src1->flags & IR3_REG_R);
219 if (src2)
220 cat2->src2_r = !!(src2->flags & IR3_REG_R);
221 }
222
223 if (src1->flags & IR3_REG_RELATIV) {
224 iassert(src1->array.offset < (1 << 10));
225 cat2->rel1.src1 = reg(src1, info, instr->repeat,
226 IR3_REG_RELATIV | IR3_REG_CONST | IR3_REG_R |
227 IR3_REG_HALF | absneg);
228 cat2->rel1.src1_c = !!(src1->flags & IR3_REG_CONST);
229 cat2->rel1.src1_rel = 1;
230 } else if (src1->flags & IR3_REG_CONST) {
231 iassert(src1->num < (1 << 12));
232 cat2->c1.src1 = reg(src1, info, instr->repeat,
233 IR3_REG_CONST | IR3_REG_R | IR3_REG_HALF |
234 absneg);
235 cat2->c1.src1_c = 1;
236 } else {
237 iassert(src1->num < (1 << 11));
238 cat2->src1 = reg(src1, info, instr->repeat,
239 IR3_REG_IMMED | IR3_REG_R | IR3_REG_HALF |
240 absneg);
241 }
242 cat2->src1_im = !!(src1->flags & IR3_REG_IMMED);
243 cat2->src1_neg = !!(src1->flags & (IR3_REG_FNEG | IR3_REG_SNEG | IR3_REG_BNOT));
244 cat2->src1_abs = !!(src1->flags & (IR3_REG_FABS | IR3_REG_SABS));
245
246 if (src2) {
247 iassert((src2->flags & IR3_REG_IMMED) ||
248 !((src1->flags ^ src2->flags) & IR3_REG_HALF));
249
250 if (src2->flags & IR3_REG_RELATIV) {
251 iassert(src2->array.offset < (1 << 10));
252 cat2->rel2.src2 = reg(src2, info, instr->repeat,
253 IR3_REG_RELATIV | IR3_REG_CONST | IR3_REG_R |
254 IR3_REG_HALF | absneg);
255 cat2->rel2.src2_c = !!(src2->flags & IR3_REG_CONST);
256 cat2->rel2.src2_rel = 1;
257 } else if (src2->flags & IR3_REG_CONST) {
258 iassert(src2->num < (1 << 12));
259 cat2->c2.src2 = reg(src2, info, instr->repeat,
260 IR3_REG_CONST | IR3_REG_R | IR3_REG_HALF |
261 absneg);
262 cat2->c2.src2_c = 1;
263 } else {
264 iassert(src2->num < (1 << 11));
265 cat2->src2 = reg(src2, info, instr->repeat,
266 IR3_REG_IMMED | IR3_REG_R | IR3_REG_HALF |
267 absneg);
268 }
269
270 cat2->src2_im = !!(src2->flags & IR3_REG_IMMED);
271 cat2->src2_neg = !!(src2->flags & (IR3_REG_FNEG | IR3_REG_SNEG | IR3_REG_BNOT));
272 cat2->src2_abs = !!(src2->flags & (IR3_REG_FABS | IR3_REG_SABS));
273 }
274
275 cat2->dst = reg(dst, info, instr->repeat,
276 IR3_REG_R | IR3_REG_EI | IR3_REG_HALF);
277 cat2->repeat = instr->repeat;
278 cat2->sat = !!(instr->flags & IR3_INSTR_SAT);
279 cat2->ss = !!(instr->flags & IR3_INSTR_SS);
280 cat2->ul = !!(instr->flags & IR3_INSTR_UL);
281 cat2->dst_half = !!((src1->flags ^ dst->flags) & IR3_REG_HALF);
282 cat2->ei = !!(dst->flags & IR3_REG_EI);
283 cat2->cond = instr->cat2.condition;
284 cat2->full = ! (src1->flags & IR3_REG_HALF);
285 cat2->opc = instr->opc;
286 cat2->jmp_tgt = !!(instr->flags & IR3_INSTR_JP);
287 cat2->sync = !!(instr->flags & IR3_INSTR_SY);
288 cat2->opc_cat = 2;
289
290 return 0;
291 }
292
293 static int emit_cat3(struct ir3_instruction *instr, void *ptr,
294 struct ir3_info *info)
295 {
296 struct ir3_register *dst = instr->regs[0];
297 struct ir3_register *src1 = instr->regs[1];
298 struct ir3_register *src2 = instr->regs[2];
299 struct ir3_register *src3 = instr->regs[3];
300 unsigned absneg = ir3_cat3_absneg(instr->opc);
301 instr_cat3_t *cat3 = ptr;
302 uint32_t src_flags = 0;
303
304 switch (instr->opc) {
305 case OPC_MAD_F16:
306 case OPC_MAD_U16:
307 case OPC_MAD_S16:
308 case OPC_SEL_B16:
309 case OPC_SEL_S16:
310 case OPC_SEL_F16:
311 case OPC_SAD_S16:
312 case OPC_SAD_S32: // really??
313 src_flags |= IR3_REG_HALF;
314 break;
315 default:
316 break;
317 }
318
319 iassert(instr->regs_count == 4);
320 iassert(!((src1->flags ^ src_flags) & IR3_REG_HALF));
321 iassert(!((src2->flags ^ src_flags) & IR3_REG_HALF));
322 iassert(!((src3->flags ^ src_flags) & IR3_REG_HALF));
323
324 if (instr->nop) {
325 iassert(!instr->repeat);
326 iassert(instr->nop <= 3);
327
328 cat3->src1_r = instr->nop & 0x1;
329 cat3->src2_r = (instr->nop >> 1) & 0x1;
330 } else {
331 cat3->src1_r = !!(src1->flags & IR3_REG_R);
332 cat3->src2_r = !!(src2->flags & IR3_REG_R);
333 }
334
335 if (src1->flags & IR3_REG_RELATIV) {
336 iassert(src1->array.offset < (1 << 10));
337 cat3->rel1.src1 = reg(src1, info, instr->repeat,
338 IR3_REG_RELATIV | IR3_REG_CONST | IR3_REG_R |
339 IR3_REG_HALF | absneg);
340 cat3->rel1.src1_c = !!(src1->flags & IR3_REG_CONST);
341 cat3->rel1.src1_rel = 1;
342 } else if (src1->flags & IR3_REG_CONST) {
343 iassert(src1->num < (1 << 12));
344 cat3->c1.src1 = reg(src1, info, instr->repeat,
345 IR3_REG_CONST | IR3_REG_R | IR3_REG_HALF | absneg);
346 cat3->c1.src1_c = 1;
347 } else {
348 iassert(src1->num < (1 << 11));
349 cat3->src1 = reg(src1, info, instr->repeat,
350 IR3_REG_R | IR3_REG_HALF | absneg);
351 }
352
353 cat3->src1_neg = !!(src1->flags & (IR3_REG_FNEG | IR3_REG_SNEG | IR3_REG_BNOT));
354
355 cat3->src2 = reg(src2, info, instr->repeat,
356 IR3_REG_CONST | IR3_REG_R | IR3_REG_HALF | absneg);
357 cat3->src2_c = !!(src2->flags & IR3_REG_CONST);
358 cat3->src2_neg = !!(src2->flags & (IR3_REG_FNEG | IR3_REG_SNEG | IR3_REG_BNOT));
359
360 if (src3->flags & IR3_REG_RELATIV) {
361 iassert(src3->array.offset < (1 << 10));
362 cat3->rel2.src3 = reg(src3, info, instr->repeat,
363 IR3_REG_RELATIV | IR3_REG_CONST | IR3_REG_R |
364 IR3_REG_HALF | absneg);
365 cat3->rel2.src3_c = !!(src3->flags & IR3_REG_CONST);
366 cat3->rel2.src3_rel = 1;
367 } else if (src3->flags & IR3_REG_CONST) {
368 iassert(src3->num < (1 << 12));
369 cat3->c2.src3 = reg(src3, info, instr->repeat,
370 IR3_REG_CONST | IR3_REG_R | IR3_REG_HALF | absneg);
371 cat3->c2.src3_c = 1;
372 } else {
373 iassert(src3->num < (1 << 11));
374 cat3->src3 = reg(src3, info, instr->repeat,
375 IR3_REG_R | IR3_REG_HALF | absneg);
376 }
377
378 cat3->src3_neg = !!(src3->flags & (IR3_REG_FNEG | IR3_REG_SNEG | IR3_REG_BNOT));
379 cat3->src3_r = !!(src3->flags & IR3_REG_R);
380
381 cat3->dst = reg(dst, info, instr->repeat, IR3_REG_R | IR3_REG_HALF);
382 cat3->repeat = instr->repeat;
383 cat3->sat = !!(instr->flags & IR3_INSTR_SAT);
384 cat3->ss = !!(instr->flags & IR3_INSTR_SS);
385 cat3->ul = !!(instr->flags & IR3_INSTR_UL);
386 cat3->dst_half = !!((src_flags ^ dst->flags) & IR3_REG_HALF);
387 cat3->opc = instr->opc;
388 cat3->jmp_tgt = !!(instr->flags & IR3_INSTR_JP);
389 cat3->sync = !!(instr->flags & IR3_INSTR_SY);
390 cat3->opc_cat = 3;
391
392 return 0;
393 }
394
395 static int emit_cat4(struct ir3_instruction *instr, void *ptr,
396 struct ir3_info *info)
397 {
398 struct ir3_register *dst = instr->regs[0];
399 struct ir3_register *src = instr->regs[1];
400 instr_cat4_t *cat4 = ptr;
401
402 iassert(instr->regs_count == 2);
403
404 if (src->flags & IR3_REG_RELATIV) {
405 iassert(src->array.offset < (1 << 10));
406 cat4->rel.src = reg(src, info, instr->repeat,
407 IR3_REG_RELATIV | IR3_REG_CONST | IR3_REG_FNEG |
408 IR3_REG_FABS | IR3_REG_R | IR3_REG_HALF);
409 cat4->rel.src_c = !!(src->flags & IR3_REG_CONST);
410 cat4->rel.src_rel = 1;
411 } else if (src->flags & IR3_REG_CONST) {
412 iassert(src->num < (1 << 12));
413 cat4->c.src = reg(src, info, instr->repeat,
414 IR3_REG_CONST | IR3_REG_FNEG | IR3_REG_FABS |
415 IR3_REG_R | IR3_REG_HALF);
416 cat4->c.src_c = 1;
417 } else {
418 iassert(src->num < (1 << 11));
419 cat4->src = reg(src, info, instr->repeat,
420 IR3_REG_IMMED | IR3_REG_FNEG | IR3_REG_FABS |
421 IR3_REG_R | IR3_REG_HALF);
422 }
423
424 cat4->src_im = !!(src->flags & IR3_REG_IMMED);
425 cat4->src_neg = !!(src->flags & IR3_REG_FNEG);
426 cat4->src_abs = !!(src->flags & IR3_REG_FABS);
427 cat4->src_r = !!(src->flags & IR3_REG_R);
428
429 cat4->dst = reg(dst, info, instr->repeat, IR3_REG_R | IR3_REG_HALF);
430 cat4->repeat = instr->repeat;
431 cat4->sat = !!(instr->flags & IR3_INSTR_SAT);
432 cat4->ss = !!(instr->flags & IR3_INSTR_SS);
433 cat4->ul = !!(instr->flags & IR3_INSTR_UL);
434 cat4->dst_half = !!((src->flags ^ dst->flags) & IR3_REG_HALF);
435 cat4->full = ! (src->flags & IR3_REG_HALF);
436 cat4->opc = instr->opc;
437 cat4->jmp_tgt = !!(instr->flags & IR3_INSTR_JP);
438 cat4->sync = !!(instr->flags & IR3_INSTR_SY);
439 cat4->opc_cat = 4;
440
441 return 0;
442 }
443
444 static int emit_cat5(struct ir3_instruction *instr, void *ptr,
445 struct ir3_info *info)
446 {
447 struct ir3_register *dst = instr->regs[0];
448 /* To simplify things when there could be zero, one, or two args other
449 * than tex/sampler idx, we use the first src reg in the ir to hold
450 * samp_tex hvec2:
451 */
452 struct ir3_register *src1;
453 struct ir3_register *src2;
454 instr_cat5_t *cat5 = ptr;
455
456 iassert((instr->regs_count == 1) ||
457 (instr->regs_count == 2) ||
458 (instr->regs_count == 3) ||
459 (instr->regs_count == 4));
460
461 if (instr->flags & IR3_INSTR_S2EN) {
462 src1 = instr->regs[2];
463 src2 = instr->regs_count > 3 ? instr->regs[3] : NULL;
464 } else {
465 src1 = instr->regs_count > 1 ? instr->regs[1] : NULL;
466 src2 = instr->regs_count > 2 ? instr->regs[2] : NULL;
467 }
468
469 assume(src1 || !src2);
470
471 if (src1) {
472 cat5->full = ! (src1->flags & IR3_REG_HALF);
473 cat5->src1 = reg(src1, info, instr->repeat, IR3_REG_HALF);
474 }
475
476 if (src2) {
477 iassert(!((src1->flags ^ src2->flags) & IR3_REG_HALF));
478 cat5->src2 = reg(src2, info, instr->repeat, IR3_REG_HALF);
479 }
480
481 if (instr->flags & IR3_INSTR_B) {
482 cat5->s2en_bindless.base_hi = instr->cat5.tex_base >> 1;
483 cat5->base_lo = instr->cat5.tex_base & 1;
484 }
485
486 if (instr->flags & IR3_INSTR_S2EN) {
487 struct ir3_register *samp_tex = instr->regs[1];
488 iassert(samp_tex->flags & IR3_REG_HALF);
489 cat5->s2en_bindless.src3 = reg(samp_tex, info, instr->repeat,
490 (instr->flags & IR3_INSTR_B) ? 0 : IR3_REG_HALF);
491 if (instr->flags & IR3_INSTR_B) {
492 if (instr->flags & IR3_INSTR_A1EN) {
493 cat5->s2en_bindless.desc_mode = CAT5_BINDLESS_A1_UNIFORM;
494 } else {
495 cat5->s2en_bindless.desc_mode = CAT5_BINDLESS_UNIFORM;
496 }
497 } else {
498 /* TODO: This should probably be CAT5_UNIFORM, at least on a6xx,
499 * as this is what the blob does and it is presumably faster, but
500 * first we should confirm it is actually nonuniform and figure
501 * out when the whole descriptor mode mechanism was introduced.
502 */
503 cat5->s2en_bindless.desc_mode = CAT5_NONUNIFORM;
504 }
505 iassert(!(instr->cat5.samp | instr->cat5.tex));
506 } else if (instr->flags & IR3_INSTR_B) {
507 cat5->s2en_bindless.src3 = instr->cat5.samp;
508 if (instr->flags & IR3_INSTR_A1EN) {
509 cat5->s2en_bindless.desc_mode = CAT5_BINDLESS_A1_IMM;
510 } else {
511 cat5->s2en_bindless.desc_mode = CAT5_BINDLESS_IMM;
512 }
513 } else {
514 cat5->norm.samp = instr->cat5.samp;
515 cat5->norm.tex = instr->cat5.tex;
516 }
517
518 cat5->dst = reg(dst, info, instr->repeat, IR3_REG_R | IR3_REG_HALF);
519 cat5->wrmask = dst->wrmask;
520 cat5->type = instr->cat5.type;
521 cat5->is_3d = !!(instr->flags & IR3_INSTR_3D);
522 cat5->is_a = !!(instr->flags & IR3_INSTR_A);
523 cat5->is_s = !!(instr->flags & IR3_INSTR_S);
524 cat5->is_s2en_bindless = !!(instr->flags & (IR3_INSTR_S2EN | IR3_INSTR_B));
525 cat5->is_o = !!(instr->flags & IR3_INSTR_O);
526 cat5->is_p = !!(instr->flags & IR3_INSTR_P);
527 cat5->opc = instr->opc;
528 cat5->jmp_tgt = !!(instr->flags & IR3_INSTR_JP);
529 cat5->sync = !!(instr->flags & IR3_INSTR_SY);
530 cat5->opc_cat = 5;
531
532 return 0;
533 }
534
535 static int emit_cat6_a6xx(struct ir3_instruction *instr, void *ptr,
536 struct ir3_info *info)
537 {
538 struct ir3_register *src1, *src2, *ssbo;
539 instr_cat6_a6xx_t *cat6 = ptr;
540 bool has_dest = (instr->opc == OPC_LDIB || instr->opc == OPC_LDC);
541
542 ssbo = instr->regs[1];
543 src1 = instr->regs[2];
544
545 if (has_dest) {
546 /* the src2 field in the instruction is actually the destination
547 * register for load instructions:
548 */
549 src2 = instr->regs[0];
550 } else {
551 src2 = instr->regs[3];
552 }
553
554 cat6->type = instr->cat6.type;
555 cat6->d = instr->cat6.d - (instr->opc == OPC_LDC ? 0 : 1);
556 cat6->typed = instr->cat6.typed;
557 cat6->type_size = instr->cat6.iim_val - 1;
558 cat6->opc = instr->opc;
559 cat6->jmp_tgt = !!(instr->flags & IR3_INSTR_JP);
560 cat6->sync = !!(instr->flags & IR3_INSTR_SY);
561 cat6->opc_cat = 6;
562
563 cat6->src1 = reg(src1, info, instr->repeat, 0);
564 cat6->src2 = reg(src2, info, instr->repeat, 0);
565 cat6->ssbo = reg(ssbo, info, instr->repeat, IR3_REG_IMMED);
566
567 if (instr->flags & IR3_INSTR_B) {
568 if (ssbo->flags & IR3_REG_IMMED) {
569 cat6->desc_mode = CAT6_BINDLESS_IMM;
570 } else {
571 cat6->desc_mode = CAT6_BINDLESS_UNIFORM;
572 }
573 cat6->base = instr->cat6.base;
574 } else {
575 if (ssbo->flags & IR3_REG_IMMED)
576 cat6->desc_mode = CAT6_IMM;
577 else
578 cat6->desc_mode = CAT6_UNIFORM;
579 }
580
581 switch (instr->opc) {
582 case OPC_ATOMIC_ADD:
583 case OPC_ATOMIC_SUB:
584 case OPC_ATOMIC_XCHG:
585 case OPC_ATOMIC_INC:
586 case OPC_ATOMIC_DEC:
587 case OPC_ATOMIC_CMPXCHG:
588 case OPC_ATOMIC_MIN:
589 case OPC_ATOMIC_MAX:
590 case OPC_ATOMIC_AND:
591 case OPC_ATOMIC_OR:
592 case OPC_ATOMIC_XOR:
593 cat6->pad1 = 0x1;
594 cat6->pad3 = 0xc;
595 cat6->pad5 = 0x3;
596 break;
597 case OPC_STIB:
598 cat6->pad1 = 0x0;
599 cat6->pad3 = 0xc;
600 cat6->pad5 = 0x2;
601 break;
602 case OPC_LDIB:
603 cat6->pad1 = 0x1;
604 cat6->pad3 = 0xc;
605 cat6->pad5 = 0x2;
606 break;
607 case OPC_LDC:
608 cat6->pad1 = 0x0;
609 cat6->pad3 = 0x8;
610 cat6->pad5 = 0x2;
611 break;
612 default:
613 iassert(0);
614 }
615 cat6->pad2 = 0x0;
616 cat6->pad4 = 0x0;
617
618 return 0;
619 }
620
621 static int emit_cat6(struct ir3_instruction *instr, void *ptr,
622 struct ir3_info *info)
623 {
624 struct ir3_register *dst, *src1, *src2;
625 instr_cat6_t *cat6 = ptr;
626
627 /* In a6xx we start using a new instruction encoding for some of
628 * these instructions:
629 */
630 if (info->gpu_id >= 600) {
631 switch (instr->opc) {
632 case OPC_ATOMIC_ADD:
633 case OPC_ATOMIC_SUB:
634 case OPC_ATOMIC_XCHG:
635 case OPC_ATOMIC_INC:
636 case OPC_ATOMIC_DEC:
637 case OPC_ATOMIC_CMPXCHG:
638 case OPC_ATOMIC_MIN:
639 case OPC_ATOMIC_MAX:
640 case OPC_ATOMIC_AND:
641 case OPC_ATOMIC_OR:
642 case OPC_ATOMIC_XOR:
643 /* The shared variants of these still use the old encoding: */
644 if (!(instr->flags & IR3_INSTR_G))
645 break;
646 /* fallthrough */
647 case OPC_STIB:
648 case OPC_LDIB:
649 case OPC_LDC:
650 return emit_cat6_a6xx(instr, ptr, info);
651 default:
652 break;
653 }
654 }
655
656 bool type_full = type_size(instr->cat6.type) == 32;
657
658 cat6->type = instr->cat6.type;
659 cat6->opc = instr->opc;
660 cat6->jmp_tgt = !!(instr->flags & IR3_INSTR_JP);
661 cat6->sync = !!(instr->flags & IR3_INSTR_SY);
662 cat6->g = !!(instr->flags & IR3_INSTR_G);
663 cat6->opc_cat = 6;
664
665 switch (instr->opc) {
666 case OPC_RESINFO:
667 case OPC_RESFMT:
668 iassert_type(instr->regs[0], type_full); /* dst */
669 iassert_type(instr->regs[1], type_full); /* src1 */
670 break;
671 case OPC_L2G:
672 case OPC_G2L:
673 iassert_type(instr->regs[0], true); /* dst */
674 iassert_type(instr->regs[1], true); /* src1 */
675 break;
676 case OPC_STG:
677 case OPC_STL:
678 case OPC_STP:
679 case OPC_STLW:
680 case OPC_STIB:
681 /* no dst, so regs[0] is dummy */
682 iassert_type(instr->regs[1], true); /* dst */
683 iassert_type(instr->regs[2], type_full); /* src1 */
684 iassert_type(instr->regs[3], true); /* src2 */
685 break;
686 default:
687 iassert_type(instr->regs[0], type_full); /* dst */
688 iassert_type(instr->regs[1], true); /* src1 */
689 if (instr->regs_count > 2)
690 iassert_type(instr->regs[2], true); /* src1 */
691 break;
692 }
693
694 /* the "dst" for a store instruction is (from the perspective
695 * of data flow in the shader, ie. register use/def, etc) in
696 * fact a register that is read by the instruction, rather
697 * than written:
698 */
699 if (is_store(instr)) {
700 iassert(instr->regs_count >= 3);
701
702 dst = instr->regs[1];
703 src1 = instr->regs[2];
704 src2 = (instr->regs_count >= 4) ? instr->regs[3] : NULL;
705 } else {
706 iassert(instr->regs_count >= 2);
707
708 dst = instr->regs[0];
709 src1 = instr->regs[1];
710 src2 = (instr->regs_count >= 3) ? instr->regs[2] : NULL;
711 }
712
713 /* TODO we need a more comprehensive list about which instructions
714 * can be encoded which way. Or possibly use IR3_INSTR_0 flag to
715 * indicate to use the src_off encoding even if offset is zero
716 * (but then what to do about dst_off?)
717 */
718 if (is_atomic(instr->opc)) {
719 instr_cat6ldgb_t *ldgb = ptr;
720
721 /* maybe these two bits both determine the instruction encoding? */
722 cat6->src_off = false;
723
724 ldgb->d = instr->cat6.d - 1;
725 ldgb->typed = instr->cat6.typed;
726 ldgb->type_size = instr->cat6.iim_val - 1;
727
728 ldgb->dst = reg(dst, info, instr->repeat, IR3_REG_R | IR3_REG_HALF);
729
730 if (ldgb->g) {
731 struct ir3_register *src3 = instr->regs[3];
732 struct ir3_register *src4 = instr->regs[4];
733
734 /* first src is src_ssbo: */
735 iassert(src1->flags & IR3_REG_IMMED);
736 ldgb->src_ssbo = src1->uim_val;
737
738 ldgb->src1 = reg(src2, info, instr->repeat, IR3_REG_IMMED);
739 ldgb->src1_im = !!(src2->flags & IR3_REG_IMMED);
740 ldgb->src2 = reg(src3, info, instr->repeat, IR3_REG_IMMED);
741 ldgb->src2_im = !!(src3->flags & IR3_REG_IMMED);
742
743 ldgb->src3 = reg(src4, info, instr->repeat, 0);
744 ldgb->pad0 = 0x1;
745 ldgb->pad3 = 0x1;
746 } else {
747 ldgb->src1 = reg(src1, info, instr->repeat, IR3_REG_IMMED);
748 ldgb->src1_im = !!(src1->flags & IR3_REG_IMMED);
749 ldgb->src2 = reg(src2, info, instr->repeat, IR3_REG_IMMED);
750 ldgb->src2_im = !!(src2->flags & IR3_REG_IMMED);
751 ldgb->pad0 = 0x1;
752 ldgb->pad3 = 0x0;
753 }
754
755 return 0;
756 } else if (instr->opc == OPC_LDGB) {
757 struct ir3_register *src3 = instr->regs[3];
758 instr_cat6ldgb_t *ldgb = ptr;
759
760 /* maybe these two bits both determine the instruction encoding? */
761 cat6->src_off = false;
762
763 ldgb->d = instr->cat6.d - 1;
764 ldgb->typed = instr->cat6.typed;
765 ldgb->type_size = instr->cat6.iim_val - 1;
766
767 ldgb->dst = reg(dst, info, instr->repeat, IR3_REG_R | IR3_REG_HALF);
768
769 /* first src is src_ssbo: */
770 iassert(src1->flags & IR3_REG_IMMED);
771 ldgb->src_ssbo = src1->uim_val;
772
773 /* then next two are src1/src2: */
774 ldgb->src1 = reg(src2, info, instr->repeat, IR3_REG_IMMED);
775 ldgb->src1_im = !!(src2->flags & IR3_REG_IMMED);
776 ldgb->src2 = reg(src3, info, instr->repeat, IR3_REG_IMMED);
777 ldgb->src2_im = !!(src3->flags & IR3_REG_IMMED);
778
779 ldgb->pad0 = 0x0;
780 ldgb->pad3 = 0x1;
781
782 return 0;
783 } else if (instr->opc == OPC_RESINFO) {
784 instr_cat6ldgb_t *ldgb = ptr;
785
786 ldgb->d = instr->cat6.d - 1;
787
788 ldgb->dst = reg(dst, info, instr->repeat, IR3_REG_R | IR3_REG_HALF);
789
790 /* first src is src_ssbo: */
791 iassert(src1->flags & IR3_REG_IMMED);
792 ldgb->src_ssbo = src1->uim_val;
793
794 return 0;
795 } else if ((instr->opc == OPC_STGB) || (instr->opc == OPC_STIB)) {
796 struct ir3_register *src3 = instr->regs[4];
797 instr_cat6stgb_t *stgb = ptr;
798
799 /* maybe these two bits both determine the instruction encoding? */
800 cat6->src_off = true;
801 stgb->pad3 = 0x2;
802
803 stgb->d = instr->cat6.d - 1;
804 stgb->typed = instr->cat6.typed;
805 stgb->type_size = instr->cat6.iim_val - 1;
806
807 /* first src is dst_ssbo: */
808 iassert(dst->flags & IR3_REG_IMMED);
809 stgb->dst_ssbo = dst->uim_val;
810
811 /* then src1/src2/src3: */
812 stgb->src1 = reg(src1, info, instr->repeat, 0);
813 stgb->src2 = reg(src2, info, instr->repeat, IR3_REG_IMMED);
814 stgb->src2_im = !!(src2->flags & IR3_REG_IMMED);
815 stgb->src3 = reg(src3, info, instr->repeat, IR3_REG_IMMED);
816 stgb->src3_im = !!(src3->flags & IR3_REG_IMMED);
817
818 return 0;
819 } else if (instr->cat6.src_offset || (instr->opc == OPC_LDG) ||
820 (instr->opc == OPC_LDL) || (instr->opc == OPC_LDLW)) {
821 struct ir3_register *src3 = instr->regs[3];
822 instr_cat6a_t *cat6a = ptr;
823
824 cat6->src_off = true;
825
826 if (instr->opc == OPC_LDG) {
827 /* For LDG src1 can not be immediate, so src1_imm is redundant and
828 * instead used to signal whether (when true) 'off' is a 32 bit
829 * register or an immediate offset.
830 */
831 cat6a->src1 = reg(src1, info, instr->repeat, 0);
832 cat6a->src1_im = !(src3->flags & IR3_REG_IMMED);
833 cat6a->off = reg(src3, info, instr->repeat, IR3_REG_IMMED);
834 } else {
835 cat6a->src1 = reg(src1, info, instr->repeat, IR3_REG_IMMED);
836 cat6a->src1_im = !!(src1->flags & IR3_REG_IMMED);
837 cat6a->off = reg(src3, info, instr->repeat, IR3_REG_IMMED);
838 iassert(src3->flags & IR3_REG_IMMED);
839 }
840
841 /* Num components */
842 cat6a->src2 = reg(src2, info, instr->repeat, IR3_REG_IMMED);
843 cat6a->src2_im = true;
844 } else {
845 instr_cat6b_t *cat6b = ptr;
846
847 cat6->src_off = false;
848
849 cat6b->src1 = reg(src1, info, instr->repeat, IR3_REG_IMMED | IR3_REG_HALF);
850 cat6b->src1_im = !!(src1->flags & IR3_REG_IMMED);
851 if (src2) {
852 cat6b->src2 = reg(src2, info, instr->repeat, IR3_REG_IMMED);
853 cat6b->src2_im = !!(src2->flags & IR3_REG_IMMED);
854 }
855 }
856
857 if (instr->cat6.dst_offset || (instr->opc == OPC_STG) ||
858 (instr->opc == OPC_STL) || (instr->opc == OPC_STLW)) {
859 instr_cat6c_t *cat6c = ptr;
860 cat6->dst_off = true;
861 cat6c->dst = reg(dst, info, instr->repeat, IR3_REG_R | IR3_REG_HALF);
862
863 if (instr->flags & IR3_INSTR_G) {
864 struct ir3_register *src3 = instr->regs[4];
865 cat6c->off = reg(src3, info, instr->repeat, IR3_REG_R | IR3_REG_HALF);
866 if (src3->flags & IR3_REG_IMMED) {
867 /* Immediate offsets are in bytes... */
868 cat6->g = false;
869 cat6c->off *= 4;
870 }
871 } else {
872 cat6c->off = instr->cat6.dst_offset;
873 }
874 } else {
875 instr_cat6d_t *cat6d = ptr;
876 cat6->dst_off = false;
877 cat6d->dst = reg(dst, info, instr->repeat, IR3_REG_R | IR3_REG_HALF);
878 }
879
880 return 0;
881 }
882
883 static int emit_cat7(struct ir3_instruction *instr, void *ptr,
884 struct ir3_info *info)
885 {
886 instr_cat7_t *cat7 = ptr;
887
888 cat7->ss = !!(instr->flags & IR3_INSTR_SS);
889 cat7->w = instr->cat7.w;
890 cat7->r = instr->cat7.r;
891 cat7->l = instr->cat7.l;
892 cat7->g = instr->cat7.g;
893 cat7->opc = instr->opc;
894 cat7->jmp_tgt = !!(instr->flags & IR3_INSTR_JP);
895 cat7->sync = !!(instr->flags & IR3_INSTR_SY);
896 cat7->opc_cat = 7;
897
898 return 0;
899 }
900
901 static int (*emit[])(struct ir3_instruction *instr, void *ptr,
902 struct ir3_info *info) = {
903 emit_cat0, emit_cat1, emit_cat2, emit_cat3, emit_cat4, emit_cat5, emit_cat6,
904 emit_cat7,
905 };
906
907 void * ir3_assemble(struct ir3 *shader, struct ir3_info *info,
908 uint32_t gpu_id)
909 {
910 uint32_t *ptr, *dwords;
911
912 memset(info, 0, sizeof(*info));
913 info->gpu_id = gpu_id;
914 info->max_reg = -1;
915 info->max_half_reg = -1;
916 info->max_const = -1;
917
918 foreach_block (block, &shader->block_list) {
919 foreach_instr (instr, &block->instr_list) {
920 info->sizedwords += 2;
921 }
922 }
923
924 /* need an integer number of instruction "groups" (sets of 16
925 * instructions on a4xx or sets of 4 instructions on a3xx),
926 * so pad out w/ NOPs if needed: (NOTE each instruction is 64bits)
927 */
928 if (gpu_id >= 400) {
929 info->sizedwords = align(info->sizedwords, 16 * 2);
930 } else {
931 info->sizedwords = align(info->sizedwords, 4 * 2);
932 }
933
934 ptr = dwords = calloc(4, info->sizedwords);
935
936 foreach_block (block, &shader->block_list) {
937 unsigned sfu_delay = 0;
938
939 foreach_instr (instr, &block->instr_list) {
940 int ret = emit[opc_cat(instr->opc)](instr, dwords, info);
941 if (ret)
942 goto fail;
943
944 if ((instr->opc == OPC_BARY_F) && (instr->regs[0]->flags & IR3_REG_EI))
945 info->last_baryf = info->instrs_count;
946
947 info->instrs_count += 1 + instr->repeat + instr->nop;
948 info->nops_count += instr->nop;
949 if (instr->opc == OPC_NOP)
950 info->nops_count += 1 + instr->repeat;
951 if (instr->opc == OPC_MOV) {
952 if (instr->cat1.src_type == instr->cat1.dst_type) {
953 info->mov_count += 1 + instr->repeat;
954 } else {
955 info->cov_count += 1 + instr->repeat;
956 }
957 }
958 dwords += 2;
959
960 if (instr->flags & IR3_INSTR_SS) {
961 info->ss++;
962 info->sstall += sfu_delay;
963 }
964
965 if (instr->flags & IR3_INSTR_SY)
966 info->sy++;
967
968 if (is_sfu(instr)) {
969 sfu_delay = 10;
970 } else if (sfu_delay > 0) {
971 sfu_delay--;
972 }
973 }
974 }
975
976 return ptr;
977
978 fail:
979 free(ptr);
980 return NULL;
981 }
982
983 static struct ir3_register * reg_create(struct ir3 *shader,
984 int num, int flags)
985 {
986 struct ir3_register *reg =
987 ir3_alloc(shader, sizeof(struct ir3_register));
988 reg->wrmask = 1;
989 reg->flags = flags;
990 reg->num = num;
991 if (shader->compiler->gpu_id >= 600)
992 reg->merged = true;
993 return reg;
994 }
995
996 static void insert_instr(struct ir3_block *block,
997 struct ir3_instruction *instr)
998 {
999 struct ir3 *shader = block->shader;
1000 #ifdef DEBUG
1001 instr->serialno = ++shader->instr_count;
1002 #endif
1003 list_addtail(&instr->node, &block->instr_list);
1004
1005 if (is_input(instr))
1006 array_insert(shader, shader->baryfs, instr);
1007 }
1008
1009 struct ir3_block * ir3_block_create(struct ir3 *shader)
1010 {
1011 struct ir3_block *block = ir3_alloc(shader, sizeof(*block));
1012 #ifdef DEBUG
1013 block->serialno = ++shader->block_count;
1014 #endif
1015 block->shader = shader;
1016 list_inithead(&block->node);
1017 list_inithead(&block->instr_list);
1018 block->predecessors = _mesa_pointer_set_create(block);
1019 return block;
1020 }
1021
1022 static struct ir3_instruction *instr_create(struct ir3_block *block, int nreg)
1023 {
1024 struct ir3_instruction *instr;
1025 unsigned sz = sizeof(*instr) + (nreg * sizeof(instr->regs[0]));
1026 char *ptr = ir3_alloc(block->shader, sz);
1027
1028 instr = (struct ir3_instruction *)ptr;
1029 ptr += sizeof(*instr);
1030 instr->regs = (struct ir3_register **)ptr;
1031
1032 #ifdef DEBUG
1033 instr->regs_max = nreg;
1034 #endif
1035
1036 return instr;
1037 }
1038
1039 struct ir3_instruction * ir3_instr_create2(struct ir3_block *block,
1040 opc_t opc, int nreg)
1041 {
1042 struct ir3_instruction *instr = instr_create(block, nreg);
1043 instr->block = block;
1044 instr->opc = opc;
1045 insert_instr(block, instr);
1046 return instr;
1047 }
1048
1049 struct ir3_instruction * ir3_instr_create(struct ir3_block *block, opc_t opc)
1050 {
1051 /* NOTE: we could be slightly more clever, at least for non-meta,
1052 * and choose # of regs based on category.
1053 */
1054 return ir3_instr_create2(block, opc, 4);
1055 }
1056
1057 struct ir3_instruction * ir3_instr_clone(struct ir3_instruction *instr)
1058 {
1059 struct ir3_instruction *new_instr = instr_create(instr->block,
1060 instr->regs_count);
1061 struct ir3_register **regs;
1062 unsigned i;
1063
1064 regs = new_instr->regs;
1065 *new_instr = *instr;
1066 new_instr->regs = regs;
1067
1068 insert_instr(instr->block, new_instr);
1069
1070 /* clone registers: */
1071 new_instr->regs_count = 0;
1072 for (i = 0; i < instr->regs_count; i++) {
1073 struct ir3_register *reg = instr->regs[i];
1074 struct ir3_register *new_reg =
1075 ir3_reg_create(new_instr, reg->num, reg->flags);
1076 *new_reg = *reg;
1077 }
1078
1079 return new_instr;
1080 }
1081
1082 /* Add a false dependency to instruction, to ensure it is scheduled first: */
1083 void ir3_instr_add_dep(struct ir3_instruction *instr, struct ir3_instruction *dep)
1084 {
1085 array_insert(instr, instr->deps, dep);
1086 }
1087
1088 struct ir3_register * ir3_reg_create(struct ir3_instruction *instr,
1089 int num, int flags)
1090 {
1091 struct ir3 *shader = instr->block->shader;
1092 struct ir3_register *reg = reg_create(shader, num, flags);
1093 #ifdef DEBUG
1094 debug_assert(instr->regs_count < instr->regs_max);
1095 #endif
1096 instr->regs[instr->regs_count++] = reg;
1097 return reg;
1098 }
1099
1100 struct ir3_register * ir3_reg_clone(struct ir3 *shader,
1101 struct ir3_register *reg)
1102 {
1103 struct ir3_register *new_reg = reg_create(shader, 0, 0);
1104 *new_reg = *reg;
1105 return new_reg;
1106 }
1107
1108 void
1109 ir3_instr_set_address(struct ir3_instruction *instr,
1110 struct ir3_instruction *addr)
1111 {
1112 if (instr->address != addr) {
1113 struct ir3 *ir = instr->block->shader;
1114
1115 debug_assert(!instr->address);
1116 debug_assert(instr->block == addr->block);
1117
1118 instr->address = addr;
1119 debug_assert(reg_num(addr->regs[0]) == REG_A0);
1120 unsigned comp = reg_comp(addr->regs[0]);
1121 if (comp == 0) {
1122 array_insert(ir, ir->a0_users, instr);
1123 } else {
1124 debug_assert(comp == 1);
1125 array_insert(ir, ir->a1_users, instr);
1126 }
1127 }
1128 }
1129
1130 void
1131 ir3_block_clear_mark(struct ir3_block *block)
1132 {
1133 foreach_instr (instr, &block->instr_list)
1134 instr->flags &= ~IR3_INSTR_MARK;
1135 }
1136
1137 void
1138 ir3_clear_mark(struct ir3 *ir)
1139 {
1140 foreach_block (block, &ir->block_list) {
1141 ir3_block_clear_mark(block);
1142 }
1143 }
1144
1145 unsigned
1146 ir3_count_instructions(struct ir3 *ir)
1147 {
1148 unsigned cnt = 1;
1149 foreach_block (block, &ir->block_list) {
1150 block->start_ip = cnt;
1151 foreach_instr (instr, &block->instr_list) {
1152 instr->ip = cnt++;
1153 }
1154 block->end_ip = cnt;
1155 }
1156 return cnt;
1157 }
1158
1159 /* When counting instructions for RA, we insert extra fake instructions at the
1160 * beginning of each block, where values become live, and at the end where
1161 * values die. This prevents problems where values live-in at the beginning or
1162 * live-out at the end of a block from being treated as if they were
1163 * live-in/live-out at the first/last instruction, which would be incorrect.
1164 * In ir3_legalize these ip's are assumed to be actual ip's of the final
1165 * program, so it would be incorrect to use this everywhere.
1166 */
1167
1168 unsigned
1169 ir3_count_instructions_ra(struct ir3 *ir)
1170 {
1171 unsigned cnt = 1;
1172 foreach_block (block, &ir->block_list) {
1173 block->start_ip = cnt++;
1174 foreach_instr (instr, &block->instr_list) {
1175 instr->ip = cnt++;
1176 }
1177 block->end_ip = cnt++;
1178 }
1179 return cnt;
1180 }
1181
1182 struct ir3_array *
1183 ir3_lookup_array(struct ir3 *ir, unsigned id)
1184 {
1185 foreach_array (arr, &ir->array_list)
1186 if (arr->id == id)
1187 return arr;
1188 return NULL;
1189 }
1190
1191 void
1192 ir3_find_ssa_uses(struct ir3 *ir, void *mem_ctx, bool falsedeps)
1193 {
1194 /* We could do this in a single pass if we can assume instructions
1195 * are always sorted. Which currently might not always be true.
1196 * (In particular after ir3_group pass, but maybe other places.)
1197 */
1198 foreach_block (block, &ir->block_list)
1199 foreach_instr (instr, &block->instr_list)
1200 instr->uses = NULL;
1201
1202 foreach_block (block, &ir->block_list) {
1203 foreach_instr (instr, &block->instr_list) {
1204 foreach_ssa_src_n (src, n, instr) {
1205 if (__is_false_dep(instr, n) && !falsedeps)
1206 continue;
1207 if (!src->uses)
1208 src->uses = _mesa_pointer_set_create(mem_ctx);
1209 _mesa_set_add(src->uses, instr);
1210 }
1211 }
1212 }
1213 }