b170093ce4304c682a3ecb375b6a59bdcab20702
[mesa.git] / src / freedreno / ir3 / ir3.c
1 /*
2 * Copyright (c) 2012 Rob Clark <robdclark@gmail.com>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #include "ir3.h"
25
26 #include <stdlib.h>
27 #include <stdio.h>
28 #include <string.h>
29 #include <assert.h>
30 #include <stdbool.h>
31 #include <errno.h>
32
33 #include "util/bitscan.h"
34 #include "util/ralloc.h"
35 #include "util/u_math.h"
36
37 #include "instr-a3xx.h"
38 #include "ir3_shader.h"
39
40 /* simple allocator to carve allocations out of an up-front allocated heap,
41 * so that we can free everything easily in one shot.
42 */
43 void * ir3_alloc(struct ir3 *shader, int sz)
44 {
45 return rzalloc_size(shader, sz); /* TODO: don't use rzalloc */
46 }
47
48 struct ir3 * ir3_create(struct ir3_compiler *compiler,
49 struct ir3_shader_variant *v)
50 {
51 struct ir3 *shader = rzalloc(v, struct ir3);
52
53 shader->compiler = compiler;
54 shader->type = v->type;
55
56 list_inithead(&shader->block_list);
57 list_inithead(&shader->array_list);
58
59 return shader;
60 }
61
62 void ir3_destroy(struct ir3 *shader)
63 {
64 ralloc_free(shader);
65 }
66
67 #define iassert(cond) do { \
68 if (!(cond)) { \
69 debug_assert(cond); \
70 return -1; \
71 } } while (0)
72
73 #define iassert_type(reg, full) do { \
74 if ((full)) { \
75 iassert(!((reg)->flags & IR3_REG_HALF)); \
76 } else { \
77 iassert((reg)->flags & IR3_REG_HALF); \
78 } } while (0);
79
80 static uint32_t reg(struct ir3_register *reg, struct ir3_info *info,
81 uint32_t repeat, uint32_t valid_flags)
82 {
83 struct ir3_shader_variant *v = info->data;
84 reg_t val = { .dummy32 = 0 };
85
86 if (reg->flags & ~valid_flags) {
87 debug_printf("INVALID FLAGS: %x vs %x\n",
88 reg->flags, valid_flags);
89 }
90
91 if (!(reg->flags & IR3_REG_R))
92 repeat = 0;
93
94 if (reg->flags & IR3_REG_IMMED) {
95 val.iim_val = reg->iim_val;
96 } else {
97 unsigned components;
98 int16_t max;
99
100 if (reg->flags & IR3_REG_RELATIV) {
101 components = reg->size;
102 val.idummy10 = reg->array.offset;
103 max = (reg->array.offset + repeat + components - 1);
104 } else {
105 components = util_last_bit(reg->wrmask);
106 val.comp = reg->num & 0x3;
107 val.num = reg->num >> 2;
108 max = (reg->num + repeat + components - 1);
109 }
110
111 if (reg->flags & IR3_REG_CONST) {
112 info->max_const = MAX2(info->max_const, max >> 2);
113 } else if (val.num == 63) {
114 /* ignore writes to dummy register r63.x */
115 } else if (max < regid(48, 0)) {
116 if (reg->flags & IR3_REG_HALF) {
117 if (v->mergedregs) {
118 /* starting w/ a6xx, half regs conflict with full regs: */
119 info->max_reg = MAX2(info->max_reg, max >> 3);
120 } else {
121 info->max_half_reg = MAX2(info->max_half_reg, max >> 2);
122 }
123 } else {
124 info->max_reg = MAX2(info->max_reg, max >> 2);
125 }
126 }
127 }
128
129 return val.dummy32;
130 }
131
132 static int emit_cat0(struct ir3_instruction *instr, void *ptr,
133 struct ir3_info *info)
134 {
135 struct ir3_shader_variant *v = info->data;
136 instr_cat0_t *cat0 = ptr;
137
138 if (v->shader->compiler->gpu_id >= 500) {
139 cat0->a5xx.immed = instr->cat0.immed;
140 } else if (v->shader->compiler->gpu_id >= 400) {
141 cat0->a4xx.immed = instr->cat0.immed;
142 } else {
143 cat0->a3xx.immed = instr->cat0.immed;
144 }
145 cat0->repeat = instr->repeat;
146 cat0->ss = !!(instr->flags & IR3_INSTR_SS);
147 cat0->inv0 = instr->cat0.inv;
148 cat0->comp0 = instr->cat0.comp;
149 cat0->opc = instr->opc;
150 cat0->opc_hi = instr->opc >= 16;
151 cat0->jmp_tgt = !!(instr->flags & IR3_INSTR_JP);
152 cat0->sync = !!(instr->flags & IR3_INSTR_SY);
153 cat0->opc_cat = 0;
154
155 return 0;
156 }
157
158 static int emit_cat1(struct ir3_instruction *instr, void *ptr,
159 struct ir3_info *info)
160 {
161 struct ir3_register *dst = instr->regs[0];
162 struct ir3_register *src = instr->regs[1];
163 instr_cat1_t *cat1 = ptr;
164
165 iassert(instr->regs_count == 2);
166 iassert_type(dst, type_size(instr->cat1.dst_type) == 32);
167 if (!(src->flags & IR3_REG_IMMED))
168 iassert_type(src, type_size(instr->cat1.src_type) == 32);
169
170 if (src->flags & IR3_REG_IMMED) {
171 cat1->iim_val = src->iim_val;
172 cat1->src_im = 1;
173 } else if (src->flags & IR3_REG_RELATIV) {
174 cat1->off = reg(src, info, instr->repeat,
175 IR3_REG_R | IR3_REG_CONST | IR3_REG_HALF | IR3_REG_RELATIV);
176 cat1->src_rel = 1;
177 cat1->src_rel_c = !!(src->flags & IR3_REG_CONST);
178 } else {
179 cat1->src = reg(src, info, instr->repeat,
180 IR3_REG_R | IR3_REG_CONST | IR3_REG_HALF);
181 cat1->src_c = !!(src->flags & IR3_REG_CONST);
182 }
183
184 cat1->dst = reg(dst, info, instr->repeat,
185 IR3_REG_RELATIV | IR3_REG_EVEN |
186 IR3_REG_R | IR3_REG_POS_INF | IR3_REG_HALF);
187 cat1->repeat = instr->repeat;
188 cat1->src_r = !!(src->flags & IR3_REG_R);
189 cat1->ss = !!(instr->flags & IR3_INSTR_SS);
190 cat1->ul = !!(instr->flags & IR3_INSTR_UL);
191 cat1->dst_type = instr->cat1.dst_type;
192 cat1->dst_rel = !!(dst->flags & IR3_REG_RELATIV);
193 cat1->src_type = instr->cat1.src_type;
194 cat1->even = !!(dst->flags & IR3_REG_EVEN);
195 cat1->pos_inf = !!(dst->flags & IR3_REG_POS_INF);
196 cat1->jmp_tgt = !!(instr->flags & IR3_INSTR_JP);
197 cat1->sync = !!(instr->flags & IR3_INSTR_SY);
198 cat1->opc_cat = 1;
199
200 return 0;
201 }
202
203 static int emit_cat2(struct ir3_instruction *instr, void *ptr,
204 struct ir3_info *info)
205 {
206 struct ir3_register *dst = instr->regs[0];
207 struct ir3_register *src1 = instr->regs[1];
208 struct ir3_register *src2 = instr->regs[2];
209 instr_cat2_t *cat2 = ptr;
210 unsigned absneg = ir3_cat2_absneg(instr->opc);
211
212 iassert((instr->regs_count == 2) || (instr->regs_count == 3));
213
214 if (instr->nop) {
215 iassert(!instr->repeat);
216 iassert(instr->nop <= 3);
217
218 cat2->src1_r = instr->nop & 0x1;
219 cat2->src2_r = (instr->nop >> 1) & 0x1;
220 } else {
221 cat2->src1_r = !!(src1->flags & IR3_REG_R);
222 if (src2)
223 cat2->src2_r = !!(src2->flags & IR3_REG_R);
224 }
225
226 if (src1->flags & IR3_REG_RELATIV) {
227 iassert(src1->array.offset < (1 << 10));
228 cat2->rel1.src1 = reg(src1, info, instr->repeat,
229 IR3_REG_RELATIV | IR3_REG_CONST | IR3_REG_R |
230 IR3_REG_HALF | absneg);
231 cat2->rel1.src1_c = !!(src1->flags & IR3_REG_CONST);
232 cat2->rel1.src1_rel = 1;
233 } else if (src1->flags & IR3_REG_CONST) {
234 iassert(src1->num < (1 << 12));
235 cat2->c1.src1 = reg(src1, info, instr->repeat,
236 IR3_REG_CONST | IR3_REG_R | IR3_REG_HALF |
237 absneg);
238 cat2->c1.src1_c = 1;
239 } else {
240 iassert(src1->num < (1 << 11));
241 cat2->src1 = reg(src1, info, instr->repeat,
242 IR3_REG_IMMED | IR3_REG_R | IR3_REG_HALF |
243 absneg);
244 }
245 cat2->src1_im = !!(src1->flags & IR3_REG_IMMED);
246 cat2->src1_neg = !!(src1->flags & (IR3_REG_FNEG | IR3_REG_SNEG | IR3_REG_BNOT));
247 cat2->src1_abs = !!(src1->flags & (IR3_REG_FABS | IR3_REG_SABS));
248
249 if (src2) {
250 iassert((src2->flags & IR3_REG_IMMED) ||
251 !((src1->flags ^ src2->flags) & IR3_REG_HALF));
252
253 if (src2->flags & IR3_REG_RELATIV) {
254 iassert(src2->array.offset < (1 << 10));
255 cat2->rel2.src2 = reg(src2, info, instr->repeat,
256 IR3_REG_RELATIV | IR3_REG_CONST | IR3_REG_R |
257 IR3_REG_HALF | absneg);
258 cat2->rel2.src2_c = !!(src2->flags & IR3_REG_CONST);
259 cat2->rel2.src2_rel = 1;
260 } else if (src2->flags & IR3_REG_CONST) {
261 iassert(src2->num < (1 << 12));
262 cat2->c2.src2 = reg(src2, info, instr->repeat,
263 IR3_REG_CONST | IR3_REG_R | IR3_REG_HALF |
264 absneg);
265 cat2->c2.src2_c = 1;
266 } else {
267 iassert(src2->num < (1 << 11));
268 cat2->src2 = reg(src2, info, instr->repeat,
269 IR3_REG_IMMED | IR3_REG_R | IR3_REG_HALF |
270 absneg);
271 }
272
273 cat2->src2_im = !!(src2->flags & IR3_REG_IMMED);
274 cat2->src2_neg = !!(src2->flags & (IR3_REG_FNEG | IR3_REG_SNEG | IR3_REG_BNOT));
275 cat2->src2_abs = !!(src2->flags & (IR3_REG_FABS | IR3_REG_SABS));
276 }
277
278 cat2->dst = reg(dst, info, instr->repeat,
279 IR3_REG_R | IR3_REG_EI | IR3_REG_HALF);
280 cat2->repeat = instr->repeat;
281 cat2->sat = !!(instr->flags & IR3_INSTR_SAT);
282 cat2->ss = !!(instr->flags & IR3_INSTR_SS);
283 cat2->ul = !!(instr->flags & IR3_INSTR_UL);
284 cat2->dst_half = !!((src1->flags ^ dst->flags) & IR3_REG_HALF);
285 cat2->ei = !!(dst->flags & IR3_REG_EI);
286 cat2->cond = instr->cat2.condition;
287 cat2->full = ! (src1->flags & IR3_REG_HALF);
288 cat2->opc = instr->opc;
289 cat2->jmp_tgt = !!(instr->flags & IR3_INSTR_JP);
290 cat2->sync = !!(instr->flags & IR3_INSTR_SY);
291 cat2->opc_cat = 2;
292
293 return 0;
294 }
295
296 static int emit_cat3(struct ir3_instruction *instr, void *ptr,
297 struct ir3_info *info)
298 {
299 struct ir3_register *dst = instr->regs[0];
300 struct ir3_register *src1 = instr->regs[1];
301 struct ir3_register *src2 = instr->regs[2];
302 struct ir3_register *src3 = instr->regs[3];
303 unsigned absneg = ir3_cat3_absneg(instr->opc);
304 instr_cat3_t *cat3 = ptr;
305 uint32_t src_flags = 0;
306
307 switch (instr->opc) {
308 case OPC_MAD_F16:
309 case OPC_MAD_U16:
310 case OPC_MAD_S16:
311 case OPC_SEL_B16:
312 case OPC_SEL_S16:
313 case OPC_SEL_F16:
314 case OPC_SAD_S16:
315 case OPC_SAD_S32: // really??
316 src_flags |= IR3_REG_HALF;
317 break;
318 default:
319 break;
320 }
321
322 iassert(instr->regs_count == 4);
323 iassert(!((src1->flags ^ src_flags) & IR3_REG_HALF));
324 iassert(!((src2->flags ^ src_flags) & IR3_REG_HALF));
325 iassert(!((src3->flags ^ src_flags) & IR3_REG_HALF));
326
327 if (instr->nop) {
328 iassert(!instr->repeat);
329 iassert(instr->nop <= 3);
330
331 cat3->src1_r = instr->nop & 0x1;
332 cat3->src2_r = (instr->nop >> 1) & 0x1;
333 } else {
334 cat3->src1_r = !!(src1->flags & IR3_REG_R);
335 cat3->src2_r = !!(src2->flags & IR3_REG_R);
336 }
337
338 if (src1->flags & IR3_REG_RELATIV) {
339 iassert(src1->array.offset < (1 << 10));
340 cat3->rel1.src1 = reg(src1, info, instr->repeat,
341 IR3_REG_RELATIV | IR3_REG_CONST | IR3_REG_R |
342 IR3_REG_HALF | absneg);
343 cat3->rel1.src1_c = !!(src1->flags & IR3_REG_CONST);
344 cat3->rel1.src1_rel = 1;
345 } else if (src1->flags & IR3_REG_CONST) {
346 iassert(src1->num < (1 << 12));
347 cat3->c1.src1 = reg(src1, info, instr->repeat,
348 IR3_REG_CONST | IR3_REG_R | IR3_REG_HALF | absneg);
349 cat3->c1.src1_c = 1;
350 } else {
351 iassert(src1->num < (1 << 11));
352 cat3->src1 = reg(src1, info, instr->repeat,
353 IR3_REG_R | IR3_REG_HALF | absneg);
354 }
355
356 cat3->src1_neg = !!(src1->flags & (IR3_REG_FNEG | IR3_REG_SNEG | IR3_REG_BNOT));
357
358 cat3->src2 = reg(src2, info, instr->repeat,
359 IR3_REG_CONST | IR3_REG_R | IR3_REG_HALF | absneg);
360 cat3->src2_c = !!(src2->flags & IR3_REG_CONST);
361 cat3->src2_neg = !!(src2->flags & (IR3_REG_FNEG | IR3_REG_SNEG | IR3_REG_BNOT));
362
363 if (src3->flags & IR3_REG_RELATIV) {
364 iassert(src3->array.offset < (1 << 10));
365 cat3->rel2.src3 = reg(src3, info, instr->repeat,
366 IR3_REG_RELATIV | IR3_REG_CONST | IR3_REG_R |
367 IR3_REG_HALF | absneg);
368 cat3->rel2.src3_c = !!(src3->flags & IR3_REG_CONST);
369 cat3->rel2.src3_rel = 1;
370 } else if (src3->flags & IR3_REG_CONST) {
371 iassert(src3->num < (1 << 12));
372 cat3->c2.src3 = reg(src3, info, instr->repeat,
373 IR3_REG_CONST | IR3_REG_R | IR3_REG_HALF | absneg);
374 cat3->c2.src3_c = 1;
375 } else {
376 iassert(src3->num < (1 << 11));
377 cat3->src3 = reg(src3, info, instr->repeat,
378 IR3_REG_R | IR3_REG_HALF | absneg);
379 }
380
381 cat3->src3_neg = !!(src3->flags & (IR3_REG_FNEG | IR3_REG_SNEG | IR3_REG_BNOT));
382 cat3->src3_r = !!(src3->flags & IR3_REG_R);
383
384 cat3->dst = reg(dst, info, instr->repeat, IR3_REG_R | IR3_REG_HALF);
385 cat3->repeat = instr->repeat;
386 cat3->sat = !!(instr->flags & IR3_INSTR_SAT);
387 cat3->ss = !!(instr->flags & IR3_INSTR_SS);
388 cat3->ul = !!(instr->flags & IR3_INSTR_UL);
389 cat3->dst_half = !!((src_flags ^ dst->flags) & IR3_REG_HALF);
390 cat3->opc = instr->opc;
391 cat3->jmp_tgt = !!(instr->flags & IR3_INSTR_JP);
392 cat3->sync = !!(instr->flags & IR3_INSTR_SY);
393 cat3->opc_cat = 3;
394
395 return 0;
396 }
397
398 static int emit_cat4(struct ir3_instruction *instr, void *ptr,
399 struct ir3_info *info)
400 {
401 struct ir3_register *dst = instr->regs[0];
402 struct ir3_register *src = instr->regs[1];
403 instr_cat4_t *cat4 = ptr;
404
405 iassert(instr->regs_count == 2);
406
407 if (src->flags & IR3_REG_RELATIV) {
408 iassert(src->array.offset < (1 << 10));
409 cat4->rel.src = reg(src, info, instr->repeat,
410 IR3_REG_RELATIV | IR3_REG_CONST | IR3_REG_FNEG |
411 IR3_REG_FABS | IR3_REG_R | IR3_REG_HALF);
412 cat4->rel.src_c = !!(src->flags & IR3_REG_CONST);
413 cat4->rel.src_rel = 1;
414 } else if (src->flags & IR3_REG_CONST) {
415 iassert(src->num < (1 << 12));
416 cat4->c.src = reg(src, info, instr->repeat,
417 IR3_REG_CONST | IR3_REG_FNEG | IR3_REG_FABS |
418 IR3_REG_R | IR3_REG_HALF);
419 cat4->c.src_c = 1;
420 } else {
421 iassert(src->num < (1 << 11));
422 cat4->src = reg(src, info, instr->repeat,
423 IR3_REG_IMMED | IR3_REG_FNEG | IR3_REG_FABS |
424 IR3_REG_R | IR3_REG_HALF);
425 }
426
427 cat4->src_im = !!(src->flags & IR3_REG_IMMED);
428 cat4->src_neg = !!(src->flags & IR3_REG_FNEG);
429 cat4->src_abs = !!(src->flags & IR3_REG_FABS);
430 cat4->src_r = !!(src->flags & IR3_REG_R);
431
432 cat4->dst = reg(dst, info, instr->repeat, IR3_REG_R | IR3_REG_HALF);
433 cat4->repeat = instr->repeat;
434 cat4->sat = !!(instr->flags & IR3_INSTR_SAT);
435 cat4->ss = !!(instr->flags & IR3_INSTR_SS);
436 cat4->ul = !!(instr->flags & IR3_INSTR_UL);
437 cat4->dst_half = !!((src->flags ^ dst->flags) & IR3_REG_HALF);
438 cat4->full = ! (src->flags & IR3_REG_HALF);
439 cat4->opc = instr->opc;
440 cat4->jmp_tgt = !!(instr->flags & IR3_INSTR_JP);
441 cat4->sync = !!(instr->flags & IR3_INSTR_SY);
442 cat4->opc_cat = 4;
443
444 return 0;
445 }
446
447 static int emit_cat5(struct ir3_instruction *instr, void *ptr,
448 struct ir3_info *info)
449 {
450 struct ir3_register *dst = instr->regs[0];
451 /* To simplify things when there could be zero, one, or two args other
452 * than tex/sampler idx, we use the first src reg in the ir to hold
453 * samp_tex hvec2:
454 */
455 struct ir3_register *src1;
456 struct ir3_register *src2;
457 instr_cat5_t *cat5 = ptr;
458
459 iassert((instr->regs_count == 1) ||
460 (instr->regs_count == 2) ||
461 (instr->regs_count == 3) ||
462 (instr->regs_count == 4));
463
464 if (instr->flags & IR3_INSTR_S2EN) {
465 src1 = instr->regs[2];
466 src2 = instr->regs_count > 3 ? instr->regs[3] : NULL;
467 } else {
468 src1 = instr->regs_count > 1 ? instr->regs[1] : NULL;
469 src2 = instr->regs_count > 2 ? instr->regs[2] : NULL;
470 }
471
472 assume(src1 || !src2);
473
474 if (src1) {
475 cat5->full = ! (src1->flags & IR3_REG_HALF);
476 cat5->src1 = reg(src1, info, instr->repeat, IR3_REG_HALF);
477 }
478
479 if (src2) {
480 iassert(!((src1->flags ^ src2->flags) & IR3_REG_HALF));
481 cat5->src2 = reg(src2, info, instr->repeat, IR3_REG_HALF);
482 }
483
484 if (instr->flags & IR3_INSTR_B) {
485 cat5->s2en_bindless.base_hi = instr->cat5.tex_base >> 1;
486 cat5->base_lo = instr->cat5.tex_base & 1;
487 }
488
489 if (instr->flags & IR3_INSTR_S2EN) {
490 struct ir3_register *samp_tex = instr->regs[1];
491 cat5->s2en_bindless.src3 = reg(samp_tex, info, instr->repeat,
492 (instr->flags & IR3_INSTR_B) ? 0 : IR3_REG_HALF);
493 if (instr->flags & IR3_INSTR_B) {
494 if (instr->flags & IR3_INSTR_A1EN) {
495 cat5->s2en_bindless.desc_mode = CAT5_BINDLESS_A1_UNIFORM;
496 } else {
497 cat5->s2en_bindless.desc_mode = CAT5_BINDLESS_UNIFORM;
498 }
499 } else {
500 /* TODO: This should probably be CAT5_UNIFORM, at least on a6xx,
501 * as this is what the blob does and it is presumably faster, but
502 * first we should confirm it is actually nonuniform and figure
503 * out when the whole descriptor mode mechanism was introduced.
504 */
505 cat5->s2en_bindless.desc_mode = CAT5_NONUNIFORM;
506 }
507 iassert(!(instr->cat5.samp | instr->cat5.tex));
508 } else if (instr->flags & IR3_INSTR_B) {
509 cat5->s2en_bindless.src3 = instr->cat5.samp;
510 if (instr->flags & IR3_INSTR_A1EN) {
511 cat5->s2en_bindless.desc_mode = CAT5_BINDLESS_A1_IMM;
512 } else {
513 cat5->s2en_bindless.desc_mode = CAT5_BINDLESS_IMM;
514 }
515 } else {
516 cat5->norm.samp = instr->cat5.samp;
517 cat5->norm.tex = instr->cat5.tex;
518 }
519
520 cat5->dst = reg(dst, info, instr->repeat, IR3_REG_R | IR3_REG_HALF);
521 cat5->wrmask = dst->wrmask;
522 cat5->type = instr->cat5.type;
523 cat5->is_3d = !!(instr->flags & IR3_INSTR_3D);
524 cat5->is_a = !!(instr->flags & IR3_INSTR_A);
525 cat5->is_s = !!(instr->flags & IR3_INSTR_S);
526 cat5->is_s2en_bindless = !!(instr->flags & (IR3_INSTR_S2EN | IR3_INSTR_B));
527 cat5->is_o = !!(instr->flags & IR3_INSTR_O);
528 cat5->is_p = !!(instr->flags & IR3_INSTR_P);
529 cat5->opc = instr->opc;
530 cat5->jmp_tgt = !!(instr->flags & IR3_INSTR_JP);
531 cat5->sync = !!(instr->flags & IR3_INSTR_SY);
532 cat5->opc_cat = 5;
533
534 return 0;
535 }
536
537 static int emit_cat6_a6xx(struct ir3_instruction *instr, void *ptr,
538 struct ir3_info *info)
539 {
540 struct ir3_register *ssbo;
541 instr_cat6_a6xx_t *cat6 = ptr;
542
543 ssbo = instr->regs[1];
544
545 cat6->type = instr->cat6.type;
546 cat6->d = instr->cat6.d - (instr->opc == OPC_LDC ? 0 : 1);
547 cat6->typed = instr->cat6.typed;
548 cat6->type_size = instr->cat6.iim_val - 1;
549 cat6->opc = instr->opc;
550 cat6->jmp_tgt = !!(instr->flags & IR3_INSTR_JP);
551 cat6->sync = !!(instr->flags & IR3_INSTR_SY);
552 cat6->opc_cat = 6;
553
554 cat6->ssbo = reg(ssbo, info, instr->repeat, IR3_REG_IMMED);
555
556 /* For unused sources in an opcode, initialize contents with the ir3 dest
557 * reg
558 */
559 switch (instr->opc) {
560 case OPC_RESINFO:
561 cat6->src1 = reg(instr->regs[0], info, instr->repeat, 0);
562 cat6->src2 = reg(instr->regs[0], info, instr->repeat, 0);
563 break;
564 case OPC_LDC:
565 case OPC_LDIB:
566 cat6->src1 = reg(instr->regs[2], info, instr->repeat, 0);
567 cat6->src2 = reg(instr->regs[0], info, instr->repeat, 0);
568 break;
569 default:
570 cat6->src1 = reg(instr->regs[2], info, instr->repeat, 0);
571 cat6->src2 = reg(instr->regs[3], info, instr->repeat, 0);
572 break;
573 }
574
575 if (instr->flags & IR3_INSTR_B) {
576 if (ssbo->flags & IR3_REG_IMMED) {
577 cat6->desc_mode = CAT6_BINDLESS_IMM;
578 } else {
579 cat6->desc_mode = CAT6_BINDLESS_UNIFORM;
580 }
581 cat6->base = instr->cat6.base;
582 } else {
583 if (ssbo->flags & IR3_REG_IMMED)
584 cat6->desc_mode = CAT6_IMM;
585 else
586 cat6->desc_mode = CAT6_UNIFORM;
587 }
588
589 switch (instr->opc) {
590 case OPC_ATOMIC_ADD:
591 case OPC_ATOMIC_SUB:
592 case OPC_ATOMIC_XCHG:
593 case OPC_ATOMIC_INC:
594 case OPC_ATOMIC_DEC:
595 case OPC_ATOMIC_CMPXCHG:
596 case OPC_ATOMIC_MIN:
597 case OPC_ATOMIC_MAX:
598 case OPC_ATOMIC_AND:
599 case OPC_ATOMIC_OR:
600 case OPC_ATOMIC_XOR:
601 cat6->pad1 = 0x1;
602 cat6->pad3 = 0xc;
603 cat6->pad5 = 0x3;
604 break;
605 case OPC_STIB:
606 cat6->pad1 = 0x0;
607 cat6->pad3 = 0xc;
608 cat6->pad5 = 0x2;
609 break;
610 case OPC_LDIB:
611 case OPC_RESINFO:
612 cat6->pad1 = 0x1;
613 cat6->pad3 = 0xc;
614 cat6->pad5 = 0x2;
615 break;
616 case OPC_LDC:
617 cat6->pad1 = 0x0;
618 cat6->pad3 = 0x8;
619 cat6->pad5 = 0x2;
620 break;
621 default:
622 iassert(0);
623 }
624 cat6->pad2 = 0x0;
625 cat6->pad4 = 0x0;
626
627 return 0;
628 }
629
630 static int emit_cat6(struct ir3_instruction *instr, void *ptr,
631 struct ir3_info *info)
632 {
633 struct ir3_shader_variant *v = info->data;
634 struct ir3_register *dst, *src1, *src2;
635 instr_cat6_t *cat6 = ptr;
636
637 /* In a6xx we start using a new instruction encoding for some of
638 * these instructions:
639 */
640 if (v->shader->compiler->gpu_id >= 600) {
641 switch (instr->opc) {
642 case OPC_ATOMIC_ADD:
643 case OPC_ATOMIC_SUB:
644 case OPC_ATOMIC_XCHG:
645 case OPC_ATOMIC_INC:
646 case OPC_ATOMIC_DEC:
647 case OPC_ATOMIC_CMPXCHG:
648 case OPC_ATOMIC_MIN:
649 case OPC_ATOMIC_MAX:
650 case OPC_ATOMIC_AND:
651 case OPC_ATOMIC_OR:
652 case OPC_ATOMIC_XOR:
653 /* The shared variants of these still use the old encoding: */
654 if (!(instr->flags & IR3_INSTR_G))
655 break;
656 /* fallthrough */
657 case OPC_STIB:
658 case OPC_LDIB:
659 case OPC_LDC:
660 case OPC_RESINFO:
661 return emit_cat6_a6xx(instr, ptr, info);
662 default:
663 break;
664 }
665 }
666
667 bool type_full = type_size(instr->cat6.type) == 32;
668
669 cat6->type = instr->cat6.type;
670 cat6->opc = instr->opc;
671 cat6->jmp_tgt = !!(instr->flags & IR3_INSTR_JP);
672 cat6->sync = !!(instr->flags & IR3_INSTR_SY);
673 cat6->g = !!(instr->flags & IR3_INSTR_G);
674 cat6->opc_cat = 6;
675
676 switch (instr->opc) {
677 case OPC_RESINFO:
678 case OPC_RESFMT:
679 iassert_type(instr->regs[0], type_full); /* dst */
680 iassert_type(instr->regs[1], type_full); /* src1 */
681 break;
682 case OPC_L2G:
683 case OPC_G2L:
684 iassert_type(instr->regs[0], true); /* dst */
685 iassert_type(instr->regs[1], true); /* src1 */
686 break;
687 case OPC_STG:
688 case OPC_STL:
689 case OPC_STP:
690 case OPC_STLW:
691 case OPC_STIB:
692 /* no dst, so regs[0] is dummy */
693 iassert_type(instr->regs[1], true); /* dst */
694 iassert_type(instr->regs[2], type_full); /* src1 */
695 iassert_type(instr->regs[3], true); /* src2 */
696 break;
697 default:
698 iassert_type(instr->regs[0], type_full); /* dst */
699 iassert_type(instr->regs[1], true); /* src1 */
700 if (instr->regs_count > 2)
701 iassert_type(instr->regs[2], true); /* src1 */
702 break;
703 }
704
705 /* the "dst" for a store instruction is (from the perspective
706 * of data flow in the shader, ie. register use/def, etc) in
707 * fact a register that is read by the instruction, rather
708 * than written:
709 */
710 if (is_store(instr)) {
711 iassert(instr->regs_count >= 3);
712
713 dst = instr->regs[1];
714 src1 = instr->regs[2];
715 src2 = (instr->regs_count >= 4) ? instr->regs[3] : NULL;
716 } else {
717 iassert(instr->regs_count >= 2);
718
719 dst = instr->regs[0];
720 src1 = instr->regs[1];
721 src2 = (instr->regs_count >= 3) ? instr->regs[2] : NULL;
722 }
723
724 /* TODO we need a more comprehensive list about which instructions
725 * can be encoded which way. Or possibly use IR3_INSTR_0 flag to
726 * indicate to use the src_off encoding even if offset is zero
727 * (but then what to do about dst_off?)
728 */
729 if (is_atomic(instr->opc)) {
730 instr_cat6ldgb_t *ldgb = ptr;
731
732 /* maybe these two bits both determine the instruction encoding? */
733 cat6->src_off = false;
734
735 ldgb->d = instr->cat6.d - 1;
736 ldgb->typed = instr->cat6.typed;
737 ldgb->type_size = instr->cat6.iim_val - 1;
738
739 ldgb->dst = reg(dst, info, instr->repeat, IR3_REG_R | IR3_REG_HALF);
740
741 if (ldgb->g) {
742 struct ir3_register *src3 = instr->regs[3];
743 struct ir3_register *src4 = instr->regs[4];
744
745 /* first src is src_ssbo: */
746 iassert(src1->flags & IR3_REG_IMMED);
747 ldgb->src_ssbo = src1->uim_val;
748 ldgb->src_ssbo_im = 0x1;
749
750 ldgb->src1 = reg(src2, info, instr->repeat, IR3_REG_IMMED);
751 ldgb->src1_im = !!(src2->flags & IR3_REG_IMMED);
752 ldgb->src2 = reg(src3, info, instr->repeat, IR3_REG_IMMED);
753 ldgb->src2_im = !!(src3->flags & IR3_REG_IMMED);
754
755 ldgb->src3 = reg(src4, info, instr->repeat, 0);
756 ldgb->pad0 = 0x1;
757 } else {
758 ldgb->src1 = reg(src1, info, instr->repeat, IR3_REG_IMMED);
759 ldgb->src1_im = !!(src1->flags & IR3_REG_IMMED);
760 ldgb->src2 = reg(src2, info, instr->repeat, IR3_REG_IMMED);
761 ldgb->src2_im = !!(src2->flags & IR3_REG_IMMED);
762 ldgb->pad0 = 0x1;
763 ldgb->src_ssbo_im = 0x0;
764 }
765
766 return 0;
767 } else if (instr->opc == OPC_LDGB) {
768 struct ir3_register *src3 = instr->regs[3];
769 instr_cat6ldgb_t *ldgb = ptr;
770
771 /* maybe these two bits both determine the instruction encoding? */
772 cat6->src_off = false;
773
774 ldgb->d = instr->cat6.d - 1;
775 ldgb->typed = instr->cat6.typed;
776 ldgb->type_size = instr->cat6.iim_val - 1;
777
778 ldgb->dst = reg(dst, info, instr->repeat, IR3_REG_R | IR3_REG_HALF);
779
780 /* first src is src_ssbo: */
781 iassert(src1->flags & IR3_REG_IMMED);
782 ldgb->src_ssbo = src1->uim_val;
783
784 /* then next two are src1/src2: */
785 ldgb->src1 = reg(src2, info, instr->repeat, IR3_REG_IMMED);
786 ldgb->src1_im = !!(src2->flags & IR3_REG_IMMED);
787 ldgb->src2 = reg(src3, info, instr->repeat, IR3_REG_IMMED);
788 ldgb->src2_im = !!(src3->flags & IR3_REG_IMMED);
789
790 ldgb->pad0 = 0x0;
791 ldgb->src_ssbo_im = true;
792
793 return 0;
794 } else if (instr->opc == OPC_RESINFO) {
795 instr_cat6ldgb_t *ldgb = ptr;
796
797 ldgb->d = instr->cat6.d - 1;
798
799 ldgb->dst = reg(dst, info, instr->repeat, IR3_REG_R | IR3_REG_HALF);
800
801 /* first src is src_ssbo: */
802 ldgb->src_ssbo = reg(src1, info, instr->repeat, IR3_REG_IMMED);
803 ldgb->src_ssbo_im = !!(src1->flags & IR3_REG_IMMED);
804
805 return 0;
806 } else if ((instr->opc == OPC_STGB) || (instr->opc == OPC_STIB)) {
807 struct ir3_register *src3 = instr->regs[4];
808 instr_cat6stgb_t *stgb = ptr;
809
810 /* maybe these two bits both determine the instruction encoding? */
811 cat6->src_off = true;
812 stgb->pad3 = 0x2;
813
814 stgb->d = instr->cat6.d - 1;
815 stgb->typed = instr->cat6.typed;
816 stgb->type_size = instr->cat6.iim_val - 1;
817
818 /* first src is dst_ssbo: */
819 iassert(dst->flags & IR3_REG_IMMED);
820 stgb->dst_ssbo = dst->uim_val;
821
822 /* then src1/src2/src3: */
823 stgb->src1 = reg(src1, info, instr->repeat, 0);
824 stgb->src2 = reg(src2, info, instr->repeat, IR3_REG_IMMED);
825 stgb->src2_im = !!(src2->flags & IR3_REG_IMMED);
826 stgb->src3 = reg(src3, info, instr->repeat, IR3_REG_IMMED);
827 stgb->src3_im = !!(src3->flags & IR3_REG_IMMED);
828
829 return 0;
830 } else if (instr->cat6.src_offset || (instr->opc == OPC_LDG) ||
831 (instr->opc == OPC_LDL) || (instr->opc == OPC_LDLW)) {
832 struct ir3_register *src3 = instr->regs[3];
833 instr_cat6a_t *cat6a = ptr;
834
835 cat6->src_off = true;
836
837 if (instr->opc == OPC_LDG) {
838 /* For LDG src1 can not be immediate, so src1_imm is redundant and
839 * instead used to signal whether (when true) 'off' is a 32 bit
840 * register or an immediate offset.
841 */
842 cat6a->src1 = reg(src1, info, instr->repeat, 0);
843 cat6a->src1_im = !(src3->flags & IR3_REG_IMMED);
844 cat6a->off = reg(src3, info, instr->repeat, IR3_REG_IMMED);
845 } else {
846 cat6a->src1 = reg(src1, info, instr->repeat, IR3_REG_IMMED);
847 cat6a->src1_im = !!(src1->flags & IR3_REG_IMMED);
848 cat6a->off = reg(src3, info, instr->repeat, IR3_REG_IMMED);
849 iassert(src3->flags & IR3_REG_IMMED);
850 }
851
852 /* Num components */
853 cat6a->src2 = reg(src2, info, instr->repeat, IR3_REG_IMMED);
854 cat6a->src2_im = true;
855 } else {
856 instr_cat6b_t *cat6b = ptr;
857
858 cat6->src_off = false;
859
860 cat6b->src1 = reg(src1, info, instr->repeat, IR3_REG_IMMED | IR3_REG_HALF);
861 cat6b->src1_im = !!(src1->flags & IR3_REG_IMMED);
862 if (src2) {
863 cat6b->src2 = reg(src2, info, instr->repeat, IR3_REG_IMMED);
864 cat6b->src2_im = !!(src2->flags & IR3_REG_IMMED);
865 }
866 }
867
868 if (instr->cat6.dst_offset || (instr->opc == OPC_STG) ||
869 (instr->opc == OPC_STL) || (instr->opc == OPC_STLW)) {
870 instr_cat6c_t *cat6c = ptr;
871 cat6->dst_off = true;
872 cat6c->dst = reg(dst, info, instr->repeat, IR3_REG_R | IR3_REG_HALF);
873
874 if (instr->flags & IR3_INSTR_G) {
875 struct ir3_register *src3 = instr->regs[4];
876 cat6c->off = reg(src3, info, instr->repeat, IR3_REG_R | IR3_REG_HALF);
877 if (src3->flags & IR3_REG_IMMED) {
878 /* Immediate offsets are in bytes... */
879 cat6->g = false;
880 cat6c->off *= 4;
881 }
882 } else {
883 cat6c->off = instr->cat6.dst_offset;
884 cat6c->off_high = instr->cat6.dst_offset >> 8;
885 }
886 } else {
887 instr_cat6d_t *cat6d = ptr;
888 cat6->dst_off = false;
889 cat6d->dst = reg(dst, info, instr->repeat, IR3_REG_R | IR3_REG_HALF);
890 }
891
892 return 0;
893 }
894
895 static int emit_cat7(struct ir3_instruction *instr, void *ptr,
896 struct ir3_info *info)
897 {
898 instr_cat7_t *cat7 = ptr;
899
900 cat7->ss = !!(instr->flags & IR3_INSTR_SS);
901 cat7->w = instr->cat7.w;
902 cat7->r = instr->cat7.r;
903 cat7->l = instr->cat7.l;
904 cat7->g = instr->cat7.g;
905 cat7->opc = instr->opc;
906 cat7->jmp_tgt = !!(instr->flags & IR3_INSTR_JP);
907 cat7->sync = !!(instr->flags & IR3_INSTR_SY);
908 cat7->opc_cat = 7;
909
910 return 0;
911 }
912
913 static int (*emit[])(struct ir3_instruction *instr, void *ptr,
914 struct ir3_info *info) = {
915 emit_cat0, emit_cat1, emit_cat2, emit_cat3, emit_cat4, emit_cat5, emit_cat6,
916 emit_cat7,
917 };
918
919 void * ir3_assemble(struct ir3_shader_variant *v)
920 {
921 uint32_t *ptr, *dwords;
922 struct ir3_info *info = &v->info;
923 struct ir3 *shader = v->ir;
924
925 memset(info, 0, sizeof(*info));
926 info->data = v;
927 info->max_reg = -1;
928 info->max_half_reg = -1;
929 info->max_const = -1;
930
931 foreach_block (block, &shader->block_list) {
932 foreach_instr (instr, &block->instr_list) {
933 info->sizedwords += 2;
934 }
935 }
936
937 /* need an integer number of instruction "groups" (sets of 16
938 * instructions on a4xx or sets of 4 instructions on a3xx),
939 * so pad out w/ NOPs if needed: (NOTE each instruction is 64bits)
940 */
941 if (v->shader->compiler->gpu_id >= 400) {
942 info->sizedwords = align(info->sizedwords, 16 * 2);
943 } else {
944 info->sizedwords = align(info->sizedwords, 4 * 2);
945 }
946
947 ptr = dwords = rzalloc_size(v, 4 * info->sizedwords);
948
949 foreach_block (block, &shader->block_list) {
950 unsigned sfu_delay = 0;
951
952 foreach_instr (instr, &block->instr_list) {
953 int ret = emit[opc_cat(instr->opc)](instr, dwords, info);
954 if (ret)
955 goto fail;
956
957 if ((instr->opc == OPC_BARY_F) && (instr->regs[0]->flags & IR3_REG_EI))
958 info->last_baryf = info->instrs_count;
959
960 info->instrs_count += 1 + instr->repeat + instr->nop;
961 info->nops_count += instr->nop;
962 if (instr->opc == OPC_NOP)
963 info->nops_count += 1 + instr->repeat;
964 if (instr->opc == OPC_MOV) {
965 if (instr->cat1.src_type == instr->cat1.dst_type) {
966 info->mov_count += 1 + instr->repeat;
967 } else {
968 info->cov_count += 1 + instr->repeat;
969 }
970 }
971 dwords += 2;
972
973 if (instr->flags & IR3_INSTR_SS) {
974 info->ss++;
975 info->sstall += sfu_delay;
976 }
977
978 if (instr->flags & IR3_INSTR_SY)
979 info->sy++;
980
981 if (is_sfu(instr)) {
982 sfu_delay = 10;
983 } else if (sfu_delay > 0) {
984 sfu_delay--;
985 }
986 }
987 }
988
989 return ptr;
990
991 fail:
992 free(ptr);
993 return NULL;
994 }
995
996 static struct ir3_register * reg_create(struct ir3 *shader,
997 int num, int flags)
998 {
999 struct ir3_register *reg =
1000 ir3_alloc(shader, sizeof(struct ir3_register));
1001 reg->wrmask = 1;
1002 reg->flags = flags;
1003 reg->num = num;
1004 return reg;
1005 }
1006
1007 static void insert_instr(struct ir3_block *block,
1008 struct ir3_instruction *instr)
1009 {
1010 struct ir3 *shader = block->shader;
1011 #ifdef DEBUG
1012 instr->serialno = ++shader->instr_count;
1013 #endif
1014 list_addtail(&instr->node, &block->instr_list);
1015
1016 if (is_input(instr))
1017 array_insert(shader, shader->baryfs, instr);
1018 }
1019
1020 struct ir3_block * ir3_block_create(struct ir3 *shader)
1021 {
1022 struct ir3_block *block = ir3_alloc(shader, sizeof(*block));
1023 #ifdef DEBUG
1024 block->serialno = ++shader->block_count;
1025 #endif
1026 block->shader = shader;
1027 list_inithead(&block->node);
1028 list_inithead(&block->instr_list);
1029 block->predecessors = _mesa_pointer_set_create(block);
1030 return block;
1031 }
1032
1033 static struct ir3_instruction *instr_create(struct ir3_block *block, int nreg)
1034 {
1035 struct ir3_instruction *instr;
1036 unsigned sz = sizeof(*instr) + (nreg * sizeof(instr->regs[0]));
1037 char *ptr = ir3_alloc(block->shader, sz);
1038
1039 instr = (struct ir3_instruction *)ptr;
1040 ptr += sizeof(*instr);
1041 instr->regs = (struct ir3_register **)ptr;
1042
1043 #ifdef DEBUG
1044 instr->regs_max = nreg;
1045 #endif
1046
1047 return instr;
1048 }
1049
1050 struct ir3_instruction * ir3_instr_create2(struct ir3_block *block,
1051 opc_t opc, int nreg)
1052 {
1053 struct ir3_instruction *instr = instr_create(block, nreg);
1054 instr->block = block;
1055 instr->opc = opc;
1056 insert_instr(block, instr);
1057 return instr;
1058 }
1059
1060 struct ir3_instruction * ir3_instr_create(struct ir3_block *block, opc_t opc)
1061 {
1062 /* NOTE: we could be slightly more clever, at least for non-meta,
1063 * and choose # of regs based on category.
1064 */
1065 return ir3_instr_create2(block, opc, 4);
1066 }
1067
1068 struct ir3_instruction * ir3_instr_clone(struct ir3_instruction *instr)
1069 {
1070 struct ir3_instruction *new_instr = instr_create(instr->block,
1071 instr->regs_count);
1072 struct ir3_register **regs;
1073 unsigned i;
1074
1075 regs = new_instr->regs;
1076 *new_instr = *instr;
1077 new_instr->regs = regs;
1078
1079 insert_instr(instr->block, new_instr);
1080
1081 /* clone registers: */
1082 new_instr->regs_count = 0;
1083 for (i = 0; i < instr->regs_count; i++) {
1084 struct ir3_register *reg = instr->regs[i];
1085 struct ir3_register *new_reg =
1086 ir3_reg_create(new_instr, reg->num, reg->flags);
1087 *new_reg = *reg;
1088 }
1089
1090 return new_instr;
1091 }
1092
1093 /* Add a false dependency to instruction, to ensure it is scheduled first: */
1094 void ir3_instr_add_dep(struct ir3_instruction *instr, struct ir3_instruction *dep)
1095 {
1096 array_insert(instr, instr->deps, dep);
1097 }
1098
1099 struct ir3_register * ir3_reg_create(struct ir3_instruction *instr,
1100 int num, int flags)
1101 {
1102 struct ir3 *shader = instr->block->shader;
1103 struct ir3_register *reg = reg_create(shader, num, flags);
1104 #ifdef DEBUG
1105 debug_assert(instr->regs_count < instr->regs_max);
1106 #endif
1107 instr->regs[instr->regs_count++] = reg;
1108 return reg;
1109 }
1110
1111 struct ir3_register * ir3_reg_clone(struct ir3 *shader,
1112 struct ir3_register *reg)
1113 {
1114 struct ir3_register *new_reg = reg_create(shader, 0, 0);
1115 *new_reg = *reg;
1116 return new_reg;
1117 }
1118
1119 void
1120 ir3_instr_set_address(struct ir3_instruction *instr,
1121 struct ir3_instruction *addr)
1122 {
1123 if (instr->address != addr) {
1124 struct ir3 *ir = instr->block->shader;
1125
1126 debug_assert(!instr->address);
1127 debug_assert(instr->block == addr->block);
1128
1129 instr->address = addr;
1130 debug_assert(reg_num(addr->regs[0]) == REG_A0);
1131 unsigned comp = reg_comp(addr->regs[0]);
1132 if (comp == 0) {
1133 array_insert(ir, ir->a0_users, instr);
1134 } else {
1135 debug_assert(comp == 1);
1136 array_insert(ir, ir->a1_users, instr);
1137 }
1138 }
1139 }
1140
1141 void
1142 ir3_block_clear_mark(struct ir3_block *block)
1143 {
1144 foreach_instr (instr, &block->instr_list)
1145 instr->flags &= ~IR3_INSTR_MARK;
1146 }
1147
1148 void
1149 ir3_clear_mark(struct ir3 *ir)
1150 {
1151 foreach_block (block, &ir->block_list) {
1152 ir3_block_clear_mark(block);
1153 }
1154 }
1155
1156 unsigned
1157 ir3_count_instructions(struct ir3 *ir)
1158 {
1159 unsigned cnt = 1;
1160 foreach_block (block, &ir->block_list) {
1161 block->start_ip = cnt;
1162 foreach_instr (instr, &block->instr_list) {
1163 instr->ip = cnt++;
1164 }
1165 block->end_ip = cnt;
1166 }
1167 return cnt;
1168 }
1169
1170 /* When counting instructions for RA, we insert extra fake instructions at the
1171 * beginning of each block, where values become live, and at the end where
1172 * values die. This prevents problems where values live-in at the beginning or
1173 * live-out at the end of a block from being treated as if they were
1174 * live-in/live-out at the first/last instruction, which would be incorrect.
1175 * In ir3_legalize these ip's are assumed to be actual ip's of the final
1176 * program, so it would be incorrect to use this everywhere.
1177 */
1178
1179 unsigned
1180 ir3_count_instructions_ra(struct ir3 *ir)
1181 {
1182 unsigned cnt = 1;
1183 foreach_block (block, &ir->block_list) {
1184 block->start_ip = cnt++;
1185 foreach_instr (instr, &block->instr_list) {
1186 instr->ip = cnt++;
1187 }
1188 block->end_ip = cnt++;
1189 }
1190 return cnt;
1191 }
1192
1193 struct ir3_array *
1194 ir3_lookup_array(struct ir3 *ir, unsigned id)
1195 {
1196 foreach_array (arr, &ir->array_list)
1197 if (arr->id == id)
1198 return arr;
1199 return NULL;
1200 }
1201
1202 void
1203 ir3_find_ssa_uses(struct ir3 *ir, void *mem_ctx, bool falsedeps)
1204 {
1205 /* We could do this in a single pass if we can assume instructions
1206 * are always sorted. Which currently might not always be true.
1207 * (In particular after ir3_group pass, but maybe other places.)
1208 */
1209 foreach_block (block, &ir->block_list)
1210 foreach_instr (instr, &block->instr_list)
1211 instr->uses = NULL;
1212
1213 foreach_block (block, &ir->block_list) {
1214 foreach_instr (instr, &block->instr_list) {
1215 foreach_ssa_src_n (src, n, instr) {
1216 if (__is_false_dep(instr, n) && !falsedeps)
1217 continue;
1218 if (!src->uses)
1219 src->uses = _mesa_pointer_set_create(mem_ctx);
1220 _mesa_set_add(src->uses, instr);
1221 }
1222 }
1223 }
1224 }
1225
1226 /**
1227 * Set the destination type of an instruction, for example if a
1228 * conversion is folded in, handling the special cases where the
1229 * instruction's dest type or opcode needs to be fixed up.
1230 */
1231 void
1232 ir3_set_dst_type(struct ir3_instruction *instr, bool half)
1233 {
1234 if (half) {
1235 instr->regs[0]->flags |= IR3_REG_HALF;
1236 } else {
1237 instr->regs[0]->flags &= ~IR3_REG_HALF;
1238 }
1239
1240 switch (opc_cat(instr->opc)) {
1241 case 1: /* move instructions */
1242 if (half) {
1243 instr->cat1.dst_type = half_type(instr->cat1.dst_type);
1244 } else {
1245 instr->cat1.dst_type = full_type(instr->cat1.dst_type);
1246 }
1247 break;
1248 case 4:
1249 if (half) {
1250 instr->opc = cat4_half_opc(instr->opc);
1251 } else {
1252 instr->opc = cat4_full_opc(instr->opc);
1253 }
1254 break;
1255 case 5:
1256 if (half) {
1257 instr->cat5.type = half_type(instr->cat5.type);
1258 } else {
1259 instr->cat5.type = full_type(instr->cat5.type);
1260 }
1261 break;
1262 }
1263 }
1264
1265 /**
1266 * One-time fixup for instruction src-types. Other than cov's that
1267 * are folded, an instruction's src type does not change.
1268 */
1269 void
1270 ir3_fixup_src_type(struct ir3_instruction *instr)
1271 {
1272 bool half = !!(instr->regs[1]->flags & IR3_REG_HALF);
1273
1274 switch (opc_cat(instr->opc)) {
1275 case 1: /* move instructions */
1276 if (half) {
1277 instr->cat1.src_type = half_type(instr->cat1.src_type);
1278 } else {
1279 instr->cat1.src_type = full_type(instr->cat1.src_type);
1280 }
1281 break;
1282 case 3:
1283 if (half) {
1284 instr->opc = cat3_half_opc(instr->opc);
1285 } else {
1286 instr->opc = cat3_full_opc(instr->opc);
1287 }
1288 break;
1289 }
1290 }
1291
1292 static unsigned
1293 cp_flags(unsigned flags)
1294 {
1295 /* only considering these flags (at least for now): */
1296 flags &= (IR3_REG_CONST | IR3_REG_IMMED |
1297 IR3_REG_FNEG | IR3_REG_FABS |
1298 IR3_REG_SNEG | IR3_REG_SABS |
1299 IR3_REG_BNOT | IR3_REG_RELATIV);
1300 return flags;
1301 }
1302
1303 bool
1304 ir3_valid_flags(struct ir3_instruction *instr, unsigned n,
1305 unsigned flags)
1306 {
1307 struct ir3_compiler *compiler = instr->block->shader->compiler;
1308 unsigned valid_flags;
1309
1310 if ((flags & IR3_REG_HIGH) &&
1311 (opc_cat(instr->opc) > 1) &&
1312 (compiler->gpu_id >= 600))
1313 return false;
1314
1315 flags = cp_flags(flags);
1316
1317 /* If destination is indirect, then source cannot be.. at least
1318 * I don't think so..
1319 */
1320 if ((instr->regs[0]->flags & IR3_REG_RELATIV) &&
1321 (flags & IR3_REG_RELATIV))
1322 return false;
1323
1324 if (flags & IR3_REG_RELATIV) {
1325 /* TODO need to test on earlier gens.. pretty sure the earlier
1326 * problem was just that we didn't check that the src was from
1327 * same block (since we can't propagate address register values
1328 * across blocks currently)
1329 */
1330 if (compiler->gpu_id < 600)
1331 return false;
1332
1333 /* NOTE in the special try_swap_mad_two_srcs() case we can be
1334 * called on a src that has already had an indirect load folded
1335 * in, in which case ssa() returns NULL
1336 */
1337 if (instr->regs[n+1]->flags & IR3_REG_SSA) {
1338 struct ir3_instruction *src = ssa(instr->regs[n+1]);
1339 if (src->address->block != instr->block)
1340 return false;
1341 }
1342 }
1343
1344 switch (opc_cat(instr->opc)) {
1345 case 1:
1346 valid_flags = IR3_REG_IMMED | IR3_REG_CONST | IR3_REG_RELATIV;
1347 if (flags & ~valid_flags)
1348 return false;
1349 break;
1350 case 2:
1351 valid_flags = ir3_cat2_absneg(instr->opc) |
1352 IR3_REG_CONST | IR3_REG_RELATIV;
1353
1354 if (ir3_cat2_int(instr->opc))
1355 valid_flags |= IR3_REG_IMMED;
1356
1357 if (flags & ~valid_flags)
1358 return false;
1359
1360 if (flags & (IR3_REG_CONST | IR3_REG_IMMED)) {
1361 unsigned m = (n ^ 1) + 1;
1362 /* cannot deal w/ const in both srcs:
1363 * (note that some cat2 actually only have a single src)
1364 */
1365 if (m < instr->regs_count) {
1366 struct ir3_register *reg = instr->regs[m];
1367 if ((flags & IR3_REG_CONST) && (reg->flags & IR3_REG_CONST))
1368 return false;
1369 if ((flags & IR3_REG_IMMED) && (reg->flags & IR3_REG_IMMED))
1370 return false;
1371 }
1372 }
1373 break;
1374 case 3:
1375 valid_flags = ir3_cat3_absneg(instr->opc) |
1376 IR3_REG_CONST | IR3_REG_RELATIV;
1377
1378 if (flags & ~valid_flags)
1379 return false;
1380
1381 if (flags & (IR3_REG_CONST | IR3_REG_RELATIV)) {
1382 /* cannot deal w/ const/relativ in 2nd src: */
1383 if (n == 1)
1384 return false;
1385 }
1386
1387 break;
1388 case 4:
1389 /* seems like blob compiler avoids const as src.. */
1390 /* TODO double check if this is still the case on a4xx */
1391 if (flags & (IR3_REG_CONST | IR3_REG_IMMED))
1392 return false;
1393 if (flags & (IR3_REG_SABS | IR3_REG_SNEG))
1394 return false;
1395 break;
1396 case 5:
1397 /* no flags allowed */
1398 if (flags)
1399 return false;
1400 break;
1401 case 6:
1402 valid_flags = IR3_REG_IMMED;
1403 if (flags & ~valid_flags)
1404 return false;
1405
1406 if (flags & IR3_REG_IMMED) {
1407 /* doesn't seem like we can have immediate src for store
1408 * instructions:
1409 *
1410 * TODO this restriction could also apply to load instructions,
1411 * but for load instructions this arg is the address (and not
1412 * really sure any good way to test a hard-coded immed addr src)
1413 */
1414 if (is_store(instr) && (n == 1))
1415 return false;
1416
1417 if ((instr->opc == OPC_LDL) && (n == 0))
1418 return false;
1419
1420 if ((instr->opc == OPC_STL) && (n != 2))
1421 return false;
1422
1423 if (instr->opc == OPC_STLW && n == 0)
1424 return false;
1425
1426 if (instr->opc == OPC_LDLW && n == 0)
1427 return false;
1428
1429 /* disallow immediates in anything but the SSBO slot argument for
1430 * cat6 instructions:
1431 */
1432 if (is_atomic(instr->opc) && (n != 0))
1433 return false;
1434
1435 if (is_atomic(instr->opc) && !(instr->flags & IR3_INSTR_G))
1436 return false;
1437
1438 if (instr->opc == OPC_STG && (instr->flags & IR3_INSTR_G) && (n != 2))
1439 return false;
1440
1441 /* as with atomics, these cat6 instrs can only have an immediate
1442 * for SSBO/IBO slot argument
1443 */
1444 switch (instr->opc) {
1445 case OPC_LDIB:
1446 case OPC_LDC:
1447 case OPC_RESINFO:
1448 if (n != 0)
1449 return false;
1450 break;
1451 default:
1452 break;
1453 }
1454 }
1455
1456 break;
1457 }
1458
1459 return true;
1460 }