freedreno/ir3: Fix disasm of register offsets in ldp/stp.
[mesa.git] / src / freedreno / ir3 / ir3.c
1 /*
2 * Copyright (c) 2012 Rob Clark <robdclark@gmail.com>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #include "ir3.h"
25
26 #include <stdlib.h>
27 #include <stdio.h>
28 #include <string.h>
29 #include <assert.h>
30 #include <stdbool.h>
31 #include <errno.h>
32
33 #include "util/bitscan.h"
34 #include "util/ralloc.h"
35 #include "util/u_math.h"
36
37 #include "instr-a3xx.h"
38 #include "ir3_shader.h"
39
40 /* simple allocator to carve allocations out of an up-front allocated heap,
41 * so that we can free everything easily in one shot.
42 */
43 void * ir3_alloc(struct ir3 *shader, int sz)
44 {
45 return rzalloc_size(shader, sz); /* TODO: don't use rzalloc */
46 }
47
48 struct ir3 * ir3_create(struct ir3_compiler *compiler,
49 struct ir3_shader_variant *v)
50 {
51 struct ir3 *shader = rzalloc(v, struct ir3);
52
53 shader->compiler = compiler;
54 shader->type = v->type;
55
56 list_inithead(&shader->block_list);
57 list_inithead(&shader->array_list);
58
59 return shader;
60 }
61
62 void ir3_destroy(struct ir3 *shader)
63 {
64 ralloc_free(shader);
65 }
66
67 #define iassert(cond) do { \
68 if (!(cond)) { \
69 debug_assert(cond); \
70 return -1; \
71 } } while (0)
72
73 #define iassert_type(reg, full) do { \
74 if ((full)) { \
75 iassert(!((reg)->flags & IR3_REG_HALF)); \
76 } else { \
77 iassert((reg)->flags & IR3_REG_HALF); \
78 } } while (0);
79
80 static uint32_t reg(struct ir3_register *reg, struct ir3_info *info,
81 uint32_t repeat, uint32_t valid_flags)
82 {
83 struct ir3_shader_variant *v = info->data;
84 reg_t val = { .dummy32 = 0 };
85
86 if (reg->flags & ~valid_flags) {
87 debug_printf("INVALID FLAGS: %x vs %x\n",
88 reg->flags, valid_flags);
89 }
90
91 if (!(reg->flags & IR3_REG_R))
92 repeat = 0;
93
94 if (reg->flags & IR3_REG_IMMED) {
95 val.iim_val = reg->iim_val;
96 } else {
97 unsigned components;
98 int16_t max;
99
100 if (reg->flags & IR3_REG_RELATIV) {
101 components = reg->size;
102 val.idummy10 = reg->array.offset;
103 max = (reg->array.offset + repeat + components - 1);
104 } else {
105 components = util_last_bit(reg->wrmask);
106 val.comp = reg->num & 0x3;
107 val.num = reg->num >> 2;
108 max = (reg->num + repeat + components - 1);
109 }
110
111 if (reg->flags & IR3_REG_CONST) {
112 info->max_const = MAX2(info->max_const, max >> 2);
113 } else if (val.num == 63) {
114 /* ignore writes to dummy register r63.x */
115 } else if (max < regid(48, 0)) {
116 if (reg->flags & IR3_REG_HALF) {
117 if (v->mergedregs) {
118 /* starting w/ a6xx, half regs conflict with full regs: */
119 info->max_reg = MAX2(info->max_reg, max >> 3);
120 } else {
121 info->max_half_reg = MAX2(info->max_half_reg, max >> 2);
122 }
123 } else {
124 info->max_reg = MAX2(info->max_reg, max >> 2);
125 }
126 }
127 }
128
129 return val.dummy32;
130 }
131
132 static int emit_cat0(struct ir3_instruction *instr, void *ptr,
133 struct ir3_info *info)
134 {
135 struct ir3_shader_variant *v = info->data;
136 instr_cat0_t *cat0 = ptr;
137
138 if (v->shader->compiler->gpu_id >= 500) {
139 cat0->a5xx.immed = instr->cat0.immed;
140 } else if (v->shader->compiler->gpu_id >= 400) {
141 cat0->a4xx.immed = instr->cat0.immed;
142 } else {
143 cat0->a3xx.immed = instr->cat0.immed;
144 }
145 cat0->repeat = instr->repeat;
146 cat0->ss = !!(instr->flags & IR3_INSTR_SS);
147 cat0->inv0 = instr->cat0.inv;
148 cat0->comp0 = instr->cat0.comp;
149 cat0->opc = instr->opc;
150 cat0->opc_hi = instr->opc >= 16;
151 cat0->jmp_tgt = !!(instr->flags & IR3_INSTR_JP);
152 cat0->sync = !!(instr->flags & IR3_INSTR_SY);
153 cat0->opc_cat = 0;
154
155 return 0;
156 }
157
158 static int emit_cat1(struct ir3_instruction *instr, void *ptr,
159 struct ir3_info *info)
160 {
161 struct ir3_register *dst = instr->regs[0];
162 struct ir3_register *src = instr->regs[1];
163 instr_cat1_t *cat1 = ptr;
164
165 iassert(instr->regs_count == 2);
166 iassert_type(dst, type_size(instr->cat1.dst_type) == 32);
167 if (!(src->flags & IR3_REG_IMMED))
168 iassert_type(src, type_size(instr->cat1.src_type) == 32);
169
170 if (src->flags & IR3_REG_IMMED) {
171 cat1->iim_val = src->iim_val;
172 cat1->src_im = 1;
173 } else if (src->flags & IR3_REG_RELATIV) {
174 cat1->off = reg(src, info, instr->repeat,
175 IR3_REG_R | IR3_REG_CONST | IR3_REG_HALF | IR3_REG_RELATIV);
176 cat1->src_rel = 1;
177 cat1->src_rel_c = !!(src->flags & IR3_REG_CONST);
178 } else {
179 cat1->src = reg(src, info, instr->repeat,
180 IR3_REG_R | IR3_REG_CONST | IR3_REG_HALF);
181 cat1->src_c = !!(src->flags & IR3_REG_CONST);
182 }
183
184 cat1->dst = reg(dst, info, instr->repeat,
185 IR3_REG_RELATIV | IR3_REG_EVEN |
186 IR3_REG_R | IR3_REG_POS_INF | IR3_REG_HALF);
187 cat1->repeat = instr->repeat;
188 cat1->src_r = !!(src->flags & IR3_REG_R);
189 cat1->ss = !!(instr->flags & IR3_INSTR_SS);
190 cat1->ul = !!(instr->flags & IR3_INSTR_UL);
191 cat1->dst_type = instr->cat1.dst_type;
192 cat1->dst_rel = !!(dst->flags & IR3_REG_RELATIV);
193 cat1->src_type = instr->cat1.src_type;
194 cat1->even = !!(dst->flags & IR3_REG_EVEN);
195 cat1->pos_inf = !!(dst->flags & IR3_REG_POS_INF);
196 cat1->jmp_tgt = !!(instr->flags & IR3_INSTR_JP);
197 cat1->sync = !!(instr->flags & IR3_INSTR_SY);
198 cat1->opc_cat = 1;
199
200 return 0;
201 }
202
203 static int emit_cat2(struct ir3_instruction *instr, void *ptr,
204 struct ir3_info *info)
205 {
206 struct ir3_register *dst = instr->regs[0];
207 struct ir3_register *src1 = instr->regs[1];
208 struct ir3_register *src2 = instr->regs[2];
209 instr_cat2_t *cat2 = ptr;
210 unsigned absneg = ir3_cat2_absneg(instr->opc);
211
212 iassert((instr->regs_count == 2) || (instr->regs_count == 3));
213
214 if (instr->nop) {
215 iassert(!instr->repeat);
216 iassert(instr->nop <= 3);
217
218 cat2->src1_r = instr->nop & 0x1;
219 cat2->src2_r = (instr->nop >> 1) & 0x1;
220 } else {
221 cat2->src1_r = !!(src1->flags & IR3_REG_R);
222 if (src2)
223 cat2->src2_r = !!(src2->flags & IR3_REG_R);
224 }
225
226 if (src1->flags & IR3_REG_RELATIV) {
227 iassert(src1->array.offset < (1 << 10));
228 cat2->rel1.src1 = reg(src1, info, instr->repeat,
229 IR3_REG_RELATIV | IR3_REG_CONST | IR3_REG_R |
230 IR3_REG_HALF | absneg);
231 cat2->rel1.src1_c = !!(src1->flags & IR3_REG_CONST);
232 cat2->rel1.src1_rel = 1;
233 } else if (src1->flags & IR3_REG_CONST) {
234 iassert(src1->num < (1 << 12));
235 cat2->c1.src1 = reg(src1, info, instr->repeat,
236 IR3_REG_CONST | IR3_REG_R | IR3_REG_HALF |
237 absneg);
238 cat2->c1.src1_c = 1;
239 } else {
240 iassert(src1->num < (1 << 11));
241 cat2->src1 = reg(src1, info, instr->repeat,
242 IR3_REG_IMMED | IR3_REG_R | IR3_REG_HALF |
243 absneg);
244 }
245 cat2->src1_im = !!(src1->flags & IR3_REG_IMMED);
246 cat2->src1_neg = !!(src1->flags & (IR3_REG_FNEG | IR3_REG_SNEG | IR3_REG_BNOT));
247 cat2->src1_abs = !!(src1->flags & (IR3_REG_FABS | IR3_REG_SABS));
248
249 if (src2) {
250 iassert((src2->flags & IR3_REG_IMMED) ||
251 !((src1->flags ^ src2->flags) & IR3_REG_HALF));
252
253 if (src2->flags & IR3_REG_RELATIV) {
254 iassert(src2->array.offset < (1 << 10));
255 cat2->rel2.src2 = reg(src2, info, instr->repeat,
256 IR3_REG_RELATIV | IR3_REG_CONST | IR3_REG_R |
257 IR3_REG_HALF | absneg);
258 cat2->rel2.src2_c = !!(src2->flags & IR3_REG_CONST);
259 cat2->rel2.src2_rel = 1;
260 } else if (src2->flags & IR3_REG_CONST) {
261 iassert(src2->num < (1 << 12));
262 cat2->c2.src2 = reg(src2, info, instr->repeat,
263 IR3_REG_CONST | IR3_REG_R | IR3_REG_HALF |
264 absneg);
265 cat2->c2.src2_c = 1;
266 } else {
267 iassert(src2->num < (1 << 11));
268 cat2->src2 = reg(src2, info, instr->repeat,
269 IR3_REG_IMMED | IR3_REG_R | IR3_REG_HALF |
270 absneg);
271 }
272
273 cat2->src2_im = !!(src2->flags & IR3_REG_IMMED);
274 cat2->src2_neg = !!(src2->flags & (IR3_REG_FNEG | IR3_REG_SNEG | IR3_REG_BNOT));
275 cat2->src2_abs = !!(src2->flags & (IR3_REG_FABS | IR3_REG_SABS));
276 }
277
278 cat2->dst = reg(dst, info, instr->repeat,
279 IR3_REG_R | IR3_REG_EI | IR3_REG_HALF);
280 cat2->repeat = instr->repeat;
281 cat2->sat = !!(instr->flags & IR3_INSTR_SAT);
282 cat2->ss = !!(instr->flags & IR3_INSTR_SS);
283 cat2->ul = !!(instr->flags & IR3_INSTR_UL);
284 cat2->dst_half = !!((src1->flags ^ dst->flags) & IR3_REG_HALF);
285 cat2->ei = !!(dst->flags & IR3_REG_EI);
286 cat2->cond = instr->cat2.condition;
287 cat2->full = ! (src1->flags & IR3_REG_HALF);
288 cat2->opc = instr->opc;
289 cat2->jmp_tgt = !!(instr->flags & IR3_INSTR_JP);
290 cat2->sync = !!(instr->flags & IR3_INSTR_SY);
291 cat2->opc_cat = 2;
292
293 return 0;
294 }
295
296 static int emit_cat3(struct ir3_instruction *instr, void *ptr,
297 struct ir3_info *info)
298 {
299 struct ir3_register *dst = instr->regs[0];
300 struct ir3_register *src1 = instr->regs[1];
301 struct ir3_register *src2 = instr->regs[2];
302 struct ir3_register *src3 = instr->regs[3];
303 unsigned absneg = ir3_cat3_absneg(instr->opc);
304 instr_cat3_t *cat3 = ptr;
305 uint32_t src_flags = 0;
306
307 switch (instr->opc) {
308 case OPC_MAD_F16:
309 case OPC_MAD_U16:
310 case OPC_MAD_S16:
311 case OPC_SEL_B16:
312 case OPC_SEL_S16:
313 case OPC_SEL_F16:
314 case OPC_SAD_S16:
315 case OPC_SAD_S32: // really??
316 src_flags |= IR3_REG_HALF;
317 break;
318 default:
319 break;
320 }
321
322 iassert(instr->regs_count == 4);
323 iassert(!((src1->flags ^ src_flags) & IR3_REG_HALF));
324 iassert(!((src2->flags ^ src_flags) & IR3_REG_HALF));
325 iassert(!((src3->flags ^ src_flags) & IR3_REG_HALF));
326
327 if (instr->nop) {
328 iassert(!instr->repeat);
329 iassert(instr->nop <= 3);
330
331 cat3->src1_r = instr->nop & 0x1;
332 cat3->src2_r = (instr->nop >> 1) & 0x1;
333 } else {
334 cat3->src1_r = !!(src1->flags & IR3_REG_R);
335 cat3->src2_r = !!(src2->flags & IR3_REG_R);
336 }
337
338 if (src1->flags & IR3_REG_RELATIV) {
339 iassert(src1->array.offset < (1 << 10));
340 cat3->rel1.src1 = reg(src1, info, instr->repeat,
341 IR3_REG_RELATIV | IR3_REG_CONST | IR3_REG_R |
342 IR3_REG_HALF | absneg);
343 cat3->rel1.src1_c = !!(src1->flags & IR3_REG_CONST);
344 cat3->rel1.src1_rel = 1;
345 } else if (src1->flags & IR3_REG_CONST) {
346 iassert(src1->num < (1 << 12));
347 cat3->c1.src1 = reg(src1, info, instr->repeat,
348 IR3_REG_CONST | IR3_REG_R | IR3_REG_HALF | absneg);
349 cat3->c1.src1_c = 1;
350 } else {
351 iassert(src1->num < (1 << 11));
352 cat3->src1 = reg(src1, info, instr->repeat,
353 IR3_REG_R | IR3_REG_HALF | absneg);
354 }
355
356 cat3->src1_neg = !!(src1->flags & (IR3_REG_FNEG | IR3_REG_SNEG | IR3_REG_BNOT));
357
358 cat3->src2 = reg(src2, info, instr->repeat,
359 IR3_REG_CONST | IR3_REG_R | IR3_REG_HALF | absneg);
360 cat3->src2_c = !!(src2->flags & IR3_REG_CONST);
361 cat3->src2_neg = !!(src2->flags & (IR3_REG_FNEG | IR3_REG_SNEG | IR3_REG_BNOT));
362
363 if (src3->flags & IR3_REG_RELATIV) {
364 iassert(src3->array.offset < (1 << 10));
365 cat3->rel2.src3 = reg(src3, info, instr->repeat,
366 IR3_REG_RELATIV | IR3_REG_CONST | IR3_REG_R |
367 IR3_REG_HALF | absneg);
368 cat3->rel2.src3_c = !!(src3->flags & IR3_REG_CONST);
369 cat3->rel2.src3_rel = 1;
370 } else if (src3->flags & IR3_REG_CONST) {
371 iassert(src3->num < (1 << 12));
372 cat3->c2.src3 = reg(src3, info, instr->repeat,
373 IR3_REG_CONST | IR3_REG_R | IR3_REG_HALF | absneg);
374 cat3->c2.src3_c = 1;
375 } else {
376 iassert(src3->num < (1 << 11));
377 cat3->src3 = reg(src3, info, instr->repeat,
378 IR3_REG_R | IR3_REG_HALF | absneg);
379 }
380
381 cat3->src3_neg = !!(src3->flags & (IR3_REG_FNEG | IR3_REG_SNEG | IR3_REG_BNOT));
382 cat3->src3_r = !!(src3->flags & IR3_REG_R);
383
384 cat3->dst = reg(dst, info, instr->repeat, IR3_REG_R | IR3_REG_HALF);
385 cat3->repeat = instr->repeat;
386 cat3->sat = !!(instr->flags & IR3_INSTR_SAT);
387 cat3->ss = !!(instr->flags & IR3_INSTR_SS);
388 cat3->ul = !!(instr->flags & IR3_INSTR_UL);
389 cat3->dst_half = !!((src_flags ^ dst->flags) & IR3_REG_HALF);
390 cat3->opc = instr->opc;
391 cat3->jmp_tgt = !!(instr->flags & IR3_INSTR_JP);
392 cat3->sync = !!(instr->flags & IR3_INSTR_SY);
393 cat3->opc_cat = 3;
394
395 return 0;
396 }
397
398 static int emit_cat4(struct ir3_instruction *instr, void *ptr,
399 struct ir3_info *info)
400 {
401 struct ir3_register *dst = instr->regs[0];
402 struct ir3_register *src = instr->regs[1];
403 instr_cat4_t *cat4 = ptr;
404
405 iassert(instr->regs_count == 2);
406
407 if (src->flags & IR3_REG_RELATIV) {
408 iassert(src->array.offset < (1 << 10));
409 cat4->rel.src = reg(src, info, instr->repeat,
410 IR3_REG_RELATIV | IR3_REG_CONST | IR3_REG_FNEG |
411 IR3_REG_FABS | IR3_REG_R | IR3_REG_HALF);
412 cat4->rel.src_c = !!(src->flags & IR3_REG_CONST);
413 cat4->rel.src_rel = 1;
414 } else if (src->flags & IR3_REG_CONST) {
415 iassert(src->num < (1 << 12));
416 cat4->c.src = reg(src, info, instr->repeat,
417 IR3_REG_CONST | IR3_REG_FNEG | IR3_REG_FABS |
418 IR3_REG_R | IR3_REG_HALF);
419 cat4->c.src_c = 1;
420 } else {
421 iassert(src->num < (1 << 11));
422 cat4->src = reg(src, info, instr->repeat,
423 IR3_REG_IMMED | IR3_REG_FNEG | IR3_REG_FABS |
424 IR3_REG_R | IR3_REG_HALF);
425 }
426
427 cat4->src_im = !!(src->flags & IR3_REG_IMMED);
428 cat4->src_neg = !!(src->flags & IR3_REG_FNEG);
429 cat4->src_abs = !!(src->flags & IR3_REG_FABS);
430 cat4->src_r = !!(src->flags & IR3_REG_R);
431
432 cat4->dst = reg(dst, info, instr->repeat, IR3_REG_R | IR3_REG_HALF);
433 cat4->repeat = instr->repeat;
434 cat4->sat = !!(instr->flags & IR3_INSTR_SAT);
435 cat4->ss = !!(instr->flags & IR3_INSTR_SS);
436 cat4->ul = !!(instr->flags & IR3_INSTR_UL);
437 cat4->dst_half = !!((src->flags ^ dst->flags) & IR3_REG_HALF);
438 cat4->full = ! (src->flags & IR3_REG_HALF);
439 cat4->opc = instr->opc;
440 cat4->jmp_tgt = !!(instr->flags & IR3_INSTR_JP);
441 cat4->sync = !!(instr->flags & IR3_INSTR_SY);
442 cat4->opc_cat = 4;
443
444 return 0;
445 }
446
447 static int emit_cat5(struct ir3_instruction *instr, void *ptr,
448 struct ir3_info *info)
449 {
450 struct ir3_register *dst = instr->regs[0];
451 /* To simplify things when there could be zero, one, or two args other
452 * than tex/sampler idx, we use the first src reg in the ir to hold
453 * samp_tex hvec2:
454 */
455 struct ir3_register *src1;
456 struct ir3_register *src2;
457 instr_cat5_t *cat5 = ptr;
458
459 iassert((instr->regs_count == 1) ||
460 (instr->regs_count == 2) ||
461 (instr->regs_count == 3) ||
462 (instr->regs_count == 4));
463
464 if (instr->flags & IR3_INSTR_S2EN) {
465 src1 = instr->regs[2];
466 src2 = instr->regs_count > 3 ? instr->regs[3] : NULL;
467 } else {
468 src1 = instr->regs_count > 1 ? instr->regs[1] : NULL;
469 src2 = instr->regs_count > 2 ? instr->regs[2] : NULL;
470 }
471
472 assume(src1 || !src2);
473
474 if (src1) {
475 cat5->full = ! (src1->flags & IR3_REG_HALF);
476 cat5->src1 = reg(src1, info, instr->repeat, IR3_REG_HALF);
477 }
478
479 if (src2) {
480 iassert(!((src1->flags ^ src2->flags) & IR3_REG_HALF));
481 cat5->src2 = reg(src2, info, instr->repeat, IR3_REG_HALF);
482 }
483
484 if (instr->flags & IR3_INSTR_B) {
485 cat5->s2en_bindless.base_hi = instr->cat5.tex_base >> 1;
486 cat5->base_lo = instr->cat5.tex_base & 1;
487 }
488
489 if (instr->flags & IR3_INSTR_S2EN) {
490 struct ir3_register *samp_tex = instr->regs[1];
491 iassert(samp_tex->flags & IR3_REG_HALF);
492 cat5->s2en_bindless.src3 = reg(samp_tex, info, instr->repeat,
493 (instr->flags & IR3_INSTR_B) ? 0 : IR3_REG_HALF);
494 if (instr->flags & IR3_INSTR_B) {
495 if (instr->flags & IR3_INSTR_A1EN) {
496 cat5->s2en_bindless.desc_mode = CAT5_BINDLESS_A1_UNIFORM;
497 } else {
498 cat5->s2en_bindless.desc_mode = CAT5_BINDLESS_UNIFORM;
499 }
500 } else {
501 /* TODO: This should probably be CAT5_UNIFORM, at least on a6xx,
502 * as this is what the blob does and it is presumably faster, but
503 * first we should confirm it is actually nonuniform and figure
504 * out when the whole descriptor mode mechanism was introduced.
505 */
506 cat5->s2en_bindless.desc_mode = CAT5_NONUNIFORM;
507 }
508 iassert(!(instr->cat5.samp | instr->cat5.tex));
509 } else if (instr->flags & IR3_INSTR_B) {
510 cat5->s2en_bindless.src3 = instr->cat5.samp;
511 if (instr->flags & IR3_INSTR_A1EN) {
512 cat5->s2en_bindless.desc_mode = CAT5_BINDLESS_A1_IMM;
513 } else {
514 cat5->s2en_bindless.desc_mode = CAT5_BINDLESS_IMM;
515 }
516 } else {
517 cat5->norm.samp = instr->cat5.samp;
518 cat5->norm.tex = instr->cat5.tex;
519 }
520
521 cat5->dst = reg(dst, info, instr->repeat, IR3_REG_R | IR3_REG_HALF);
522 cat5->wrmask = dst->wrmask;
523 cat5->type = instr->cat5.type;
524 cat5->is_3d = !!(instr->flags & IR3_INSTR_3D);
525 cat5->is_a = !!(instr->flags & IR3_INSTR_A);
526 cat5->is_s = !!(instr->flags & IR3_INSTR_S);
527 cat5->is_s2en_bindless = !!(instr->flags & (IR3_INSTR_S2EN | IR3_INSTR_B));
528 cat5->is_o = !!(instr->flags & IR3_INSTR_O);
529 cat5->is_p = !!(instr->flags & IR3_INSTR_P);
530 cat5->opc = instr->opc;
531 cat5->jmp_tgt = !!(instr->flags & IR3_INSTR_JP);
532 cat5->sync = !!(instr->flags & IR3_INSTR_SY);
533 cat5->opc_cat = 5;
534
535 return 0;
536 }
537
538 static int emit_cat6_a6xx(struct ir3_instruction *instr, void *ptr,
539 struct ir3_info *info)
540 {
541 struct ir3_register *ssbo;
542 instr_cat6_a6xx_t *cat6 = ptr;
543
544 ssbo = instr->regs[1];
545
546 cat6->type = instr->cat6.type;
547 cat6->d = instr->cat6.d - (instr->opc == OPC_LDC ? 0 : 1);
548 cat6->typed = instr->cat6.typed;
549 cat6->type_size = instr->cat6.iim_val - 1;
550 cat6->opc = instr->opc;
551 cat6->jmp_tgt = !!(instr->flags & IR3_INSTR_JP);
552 cat6->sync = !!(instr->flags & IR3_INSTR_SY);
553 cat6->opc_cat = 6;
554
555 cat6->ssbo = reg(ssbo, info, instr->repeat, IR3_REG_IMMED);
556
557 /* For unused sources in an opcode, initialize contents with the ir3 dest
558 * reg
559 */
560 switch (instr->opc) {
561 case OPC_RESINFO:
562 cat6->src1 = reg(instr->regs[0], info, instr->repeat, 0);
563 cat6->src2 = reg(instr->regs[0], info, instr->repeat, 0);
564 break;
565 case OPC_LDC:
566 case OPC_LDIB:
567 cat6->src1 = reg(instr->regs[2], info, instr->repeat, 0);
568 cat6->src2 = reg(instr->regs[0], info, instr->repeat, 0);
569 break;
570 default:
571 cat6->src1 = reg(instr->regs[2], info, instr->repeat, 0);
572 cat6->src2 = reg(instr->regs[3], info, instr->repeat, 0);
573 break;
574 }
575
576 if (instr->flags & IR3_INSTR_B) {
577 if (ssbo->flags & IR3_REG_IMMED) {
578 cat6->desc_mode = CAT6_BINDLESS_IMM;
579 } else {
580 cat6->desc_mode = CAT6_BINDLESS_UNIFORM;
581 }
582 cat6->base = instr->cat6.base;
583 } else {
584 if (ssbo->flags & IR3_REG_IMMED)
585 cat6->desc_mode = CAT6_IMM;
586 else
587 cat6->desc_mode = CAT6_UNIFORM;
588 }
589
590 switch (instr->opc) {
591 case OPC_ATOMIC_ADD:
592 case OPC_ATOMIC_SUB:
593 case OPC_ATOMIC_XCHG:
594 case OPC_ATOMIC_INC:
595 case OPC_ATOMIC_DEC:
596 case OPC_ATOMIC_CMPXCHG:
597 case OPC_ATOMIC_MIN:
598 case OPC_ATOMIC_MAX:
599 case OPC_ATOMIC_AND:
600 case OPC_ATOMIC_OR:
601 case OPC_ATOMIC_XOR:
602 cat6->pad1 = 0x1;
603 cat6->pad3 = 0xc;
604 cat6->pad5 = 0x3;
605 break;
606 case OPC_STIB:
607 cat6->pad1 = 0x0;
608 cat6->pad3 = 0xc;
609 cat6->pad5 = 0x2;
610 break;
611 case OPC_LDIB:
612 case OPC_RESINFO:
613 cat6->pad1 = 0x1;
614 cat6->pad3 = 0xc;
615 cat6->pad5 = 0x2;
616 break;
617 case OPC_LDC:
618 cat6->pad1 = 0x0;
619 cat6->pad3 = 0x8;
620 cat6->pad5 = 0x2;
621 break;
622 default:
623 iassert(0);
624 }
625 cat6->pad2 = 0x0;
626 cat6->pad4 = 0x0;
627
628 return 0;
629 }
630
631 static int emit_cat6(struct ir3_instruction *instr, void *ptr,
632 struct ir3_info *info)
633 {
634 struct ir3_shader_variant *v = info->data;
635 struct ir3_register *dst, *src1, *src2;
636 instr_cat6_t *cat6 = ptr;
637
638 /* In a6xx we start using a new instruction encoding for some of
639 * these instructions:
640 */
641 if (v->shader->compiler->gpu_id >= 600) {
642 switch (instr->opc) {
643 case OPC_ATOMIC_ADD:
644 case OPC_ATOMIC_SUB:
645 case OPC_ATOMIC_XCHG:
646 case OPC_ATOMIC_INC:
647 case OPC_ATOMIC_DEC:
648 case OPC_ATOMIC_CMPXCHG:
649 case OPC_ATOMIC_MIN:
650 case OPC_ATOMIC_MAX:
651 case OPC_ATOMIC_AND:
652 case OPC_ATOMIC_OR:
653 case OPC_ATOMIC_XOR:
654 /* The shared variants of these still use the old encoding: */
655 if (!(instr->flags & IR3_INSTR_G))
656 break;
657 /* fallthrough */
658 case OPC_STIB:
659 case OPC_LDIB:
660 case OPC_LDC:
661 case OPC_RESINFO:
662 return emit_cat6_a6xx(instr, ptr, info);
663 default:
664 break;
665 }
666 }
667
668 bool type_full = type_size(instr->cat6.type) == 32;
669
670 cat6->type = instr->cat6.type;
671 cat6->opc = instr->opc;
672 cat6->jmp_tgt = !!(instr->flags & IR3_INSTR_JP);
673 cat6->sync = !!(instr->flags & IR3_INSTR_SY);
674 cat6->g = !!(instr->flags & IR3_INSTR_G);
675 cat6->opc_cat = 6;
676
677 switch (instr->opc) {
678 case OPC_RESINFO:
679 case OPC_RESFMT:
680 iassert_type(instr->regs[0], type_full); /* dst */
681 iassert_type(instr->regs[1], type_full); /* src1 */
682 break;
683 case OPC_L2G:
684 case OPC_G2L:
685 iassert_type(instr->regs[0], true); /* dst */
686 iassert_type(instr->regs[1], true); /* src1 */
687 break;
688 case OPC_STG:
689 case OPC_STL:
690 case OPC_STP:
691 case OPC_STLW:
692 case OPC_STIB:
693 /* no dst, so regs[0] is dummy */
694 iassert_type(instr->regs[1], true); /* dst */
695 iassert_type(instr->regs[2], type_full); /* src1 */
696 iassert_type(instr->regs[3], true); /* src2 */
697 break;
698 default:
699 iassert_type(instr->regs[0], type_full); /* dst */
700 iassert_type(instr->regs[1], true); /* src1 */
701 if (instr->regs_count > 2)
702 iassert_type(instr->regs[2], true); /* src1 */
703 break;
704 }
705
706 /* the "dst" for a store instruction is (from the perspective
707 * of data flow in the shader, ie. register use/def, etc) in
708 * fact a register that is read by the instruction, rather
709 * than written:
710 */
711 if (is_store(instr)) {
712 iassert(instr->regs_count >= 3);
713
714 dst = instr->regs[1];
715 src1 = instr->regs[2];
716 src2 = (instr->regs_count >= 4) ? instr->regs[3] : NULL;
717 } else {
718 iassert(instr->regs_count >= 2);
719
720 dst = instr->regs[0];
721 src1 = instr->regs[1];
722 src2 = (instr->regs_count >= 3) ? instr->regs[2] : NULL;
723 }
724
725 /* TODO we need a more comprehensive list about which instructions
726 * can be encoded which way. Or possibly use IR3_INSTR_0 flag to
727 * indicate to use the src_off encoding even if offset is zero
728 * (but then what to do about dst_off?)
729 */
730 if (is_atomic(instr->opc)) {
731 instr_cat6ldgb_t *ldgb = ptr;
732
733 /* maybe these two bits both determine the instruction encoding? */
734 cat6->src_off = false;
735
736 ldgb->d = instr->cat6.d - 1;
737 ldgb->typed = instr->cat6.typed;
738 ldgb->type_size = instr->cat6.iim_val - 1;
739
740 ldgb->dst = reg(dst, info, instr->repeat, IR3_REG_R | IR3_REG_HALF);
741
742 if (ldgb->g) {
743 struct ir3_register *src3 = instr->regs[3];
744 struct ir3_register *src4 = instr->regs[4];
745
746 /* first src is src_ssbo: */
747 iassert(src1->flags & IR3_REG_IMMED);
748 ldgb->src_ssbo = src1->uim_val;
749 ldgb->src_ssbo_im = 0x1;
750
751 ldgb->src1 = reg(src2, info, instr->repeat, IR3_REG_IMMED);
752 ldgb->src1_im = !!(src2->flags & IR3_REG_IMMED);
753 ldgb->src2 = reg(src3, info, instr->repeat, IR3_REG_IMMED);
754 ldgb->src2_im = !!(src3->flags & IR3_REG_IMMED);
755
756 ldgb->src3 = reg(src4, info, instr->repeat, 0);
757 ldgb->pad0 = 0x1;
758 } else {
759 ldgb->src1 = reg(src1, info, instr->repeat, IR3_REG_IMMED);
760 ldgb->src1_im = !!(src1->flags & IR3_REG_IMMED);
761 ldgb->src2 = reg(src2, info, instr->repeat, IR3_REG_IMMED);
762 ldgb->src2_im = !!(src2->flags & IR3_REG_IMMED);
763 ldgb->pad0 = 0x1;
764 ldgb->src_ssbo_im = 0x0;
765 }
766
767 return 0;
768 } else if (instr->opc == OPC_LDGB) {
769 struct ir3_register *src3 = instr->regs[3];
770 instr_cat6ldgb_t *ldgb = ptr;
771
772 /* maybe these two bits both determine the instruction encoding? */
773 cat6->src_off = false;
774
775 ldgb->d = instr->cat6.d - 1;
776 ldgb->typed = instr->cat6.typed;
777 ldgb->type_size = instr->cat6.iim_val - 1;
778
779 ldgb->dst = reg(dst, info, instr->repeat, IR3_REG_R | IR3_REG_HALF);
780
781 /* first src is src_ssbo: */
782 iassert(src1->flags & IR3_REG_IMMED);
783 ldgb->src_ssbo = src1->uim_val;
784
785 /* then next two are src1/src2: */
786 ldgb->src1 = reg(src2, info, instr->repeat, IR3_REG_IMMED);
787 ldgb->src1_im = !!(src2->flags & IR3_REG_IMMED);
788 ldgb->src2 = reg(src3, info, instr->repeat, IR3_REG_IMMED);
789 ldgb->src2_im = !!(src3->flags & IR3_REG_IMMED);
790
791 ldgb->pad0 = 0x0;
792 ldgb->src_ssbo_im = true;
793
794 return 0;
795 } else if (instr->opc == OPC_RESINFO) {
796 instr_cat6ldgb_t *ldgb = ptr;
797
798 ldgb->d = instr->cat6.d - 1;
799
800 ldgb->dst = reg(dst, info, instr->repeat, IR3_REG_R | IR3_REG_HALF);
801
802 /* first src is src_ssbo: */
803 ldgb->src_ssbo = reg(src1, info, instr->repeat, IR3_REG_IMMED);
804 ldgb->src_ssbo_im = !!(src1->flags & IR3_REG_IMMED);
805
806 return 0;
807 } else if ((instr->opc == OPC_STGB) || (instr->opc == OPC_STIB)) {
808 struct ir3_register *src3 = instr->regs[4];
809 instr_cat6stgb_t *stgb = ptr;
810
811 /* maybe these two bits both determine the instruction encoding? */
812 cat6->src_off = true;
813 stgb->pad3 = 0x2;
814
815 stgb->d = instr->cat6.d - 1;
816 stgb->typed = instr->cat6.typed;
817 stgb->type_size = instr->cat6.iim_val - 1;
818
819 /* first src is dst_ssbo: */
820 iassert(dst->flags & IR3_REG_IMMED);
821 stgb->dst_ssbo = dst->uim_val;
822
823 /* then src1/src2/src3: */
824 stgb->src1 = reg(src1, info, instr->repeat, 0);
825 stgb->src2 = reg(src2, info, instr->repeat, IR3_REG_IMMED);
826 stgb->src2_im = !!(src2->flags & IR3_REG_IMMED);
827 stgb->src3 = reg(src3, info, instr->repeat, IR3_REG_IMMED);
828 stgb->src3_im = !!(src3->flags & IR3_REG_IMMED);
829
830 return 0;
831 } else if (instr->cat6.src_offset || (instr->opc == OPC_LDG) ||
832 (instr->opc == OPC_LDL) || (instr->opc == OPC_LDLW)) {
833 struct ir3_register *src3 = instr->regs[3];
834 instr_cat6a_t *cat6a = ptr;
835
836 cat6->src_off = true;
837
838 if (instr->opc == OPC_LDG) {
839 /* For LDG src1 can not be immediate, so src1_imm is redundant and
840 * instead used to signal whether (when true) 'off' is a 32 bit
841 * register or an immediate offset.
842 */
843 cat6a->src1 = reg(src1, info, instr->repeat, 0);
844 cat6a->src1_im = !(src3->flags & IR3_REG_IMMED);
845 cat6a->off = reg(src3, info, instr->repeat, IR3_REG_IMMED);
846 } else {
847 cat6a->src1 = reg(src1, info, instr->repeat, IR3_REG_IMMED);
848 cat6a->src1_im = !!(src1->flags & IR3_REG_IMMED);
849 cat6a->off = reg(src3, info, instr->repeat, IR3_REG_IMMED);
850 iassert(src3->flags & IR3_REG_IMMED);
851 }
852
853 /* Num components */
854 cat6a->src2 = reg(src2, info, instr->repeat, IR3_REG_IMMED);
855 cat6a->src2_im = true;
856 } else {
857 instr_cat6b_t *cat6b = ptr;
858
859 cat6->src_off = false;
860
861 cat6b->src1 = reg(src1, info, instr->repeat, IR3_REG_IMMED | IR3_REG_HALF);
862 cat6b->src1_im = !!(src1->flags & IR3_REG_IMMED);
863 if (src2) {
864 cat6b->src2 = reg(src2, info, instr->repeat, IR3_REG_IMMED);
865 cat6b->src2_im = !!(src2->flags & IR3_REG_IMMED);
866 }
867 }
868
869 if (instr->cat6.dst_offset || (instr->opc == OPC_STG) ||
870 (instr->opc == OPC_STL) || (instr->opc == OPC_STLW)) {
871 instr_cat6c_t *cat6c = ptr;
872 cat6->dst_off = true;
873 cat6c->dst = reg(dst, info, instr->repeat, IR3_REG_R | IR3_REG_HALF);
874
875 if (instr->flags & IR3_INSTR_G) {
876 struct ir3_register *src3 = instr->regs[4];
877 cat6c->off = reg(src3, info, instr->repeat, IR3_REG_R | IR3_REG_HALF);
878 if (src3->flags & IR3_REG_IMMED) {
879 /* Immediate offsets are in bytes... */
880 cat6->g = false;
881 cat6c->off *= 4;
882 }
883 } else {
884 cat6c->off = instr->cat6.dst_offset;
885 cat6c->off_high = instr->cat6.dst_offset >> 8;
886 }
887 } else {
888 instr_cat6d_t *cat6d = ptr;
889 cat6->dst_off = false;
890 cat6d->dst = reg(dst, info, instr->repeat, IR3_REG_R | IR3_REG_HALF);
891 }
892
893 return 0;
894 }
895
896 static int emit_cat7(struct ir3_instruction *instr, void *ptr,
897 struct ir3_info *info)
898 {
899 instr_cat7_t *cat7 = ptr;
900
901 cat7->ss = !!(instr->flags & IR3_INSTR_SS);
902 cat7->w = instr->cat7.w;
903 cat7->r = instr->cat7.r;
904 cat7->l = instr->cat7.l;
905 cat7->g = instr->cat7.g;
906 cat7->opc = instr->opc;
907 cat7->jmp_tgt = !!(instr->flags & IR3_INSTR_JP);
908 cat7->sync = !!(instr->flags & IR3_INSTR_SY);
909 cat7->opc_cat = 7;
910
911 return 0;
912 }
913
914 static int (*emit[])(struct ir3_instruction *instr, void *ptr,
915 struct ir3_info *info) = {
916 emit_cat0, emit_cat1, emit_cat2, emit_cat3, emit_cat4, emit_cat5, emit_cat6,
917 emit_cat7,
918 };
919
920 void * ir3_assemble(struct ir3_shader_variant *v)
921 {
922 uint32_t *ptr, *dwords;
923 struct ir3_info *info = &v->info;
924 struct ir3 *shader = v->ir;
925
926 memset(info, 0, sizeof(*info));
927 info->data = v;
928 info->max_reg = -1;
929 info->max_half_reg = -1;
930 info->max_const = -1;
931
932 foreach_block (block, &shader->block_list) {
933 foreach_instr (instr, &block->instr_list) {
934 info->sizedwords += 2;
935 }
936 }
937
938 /* need an integer number of instruction "groups" (sets of 16
939 * instructions on a4xx or sets of 4 instructions on a3xx),
940 * so pad out w/ NOPs if needed: (NOTE each instruction is 64bits)
941 */
942 if (v->shader->compiler->gpu_id >= 400) {
943 info->sizedwords = align(info->sizedwords, 16 * 2);
944 } else {
945 info->sizedwords = align(info->sizedwords, 4 * 2);
946 }
947
948 ptr = dwords = rzalloc_size(v, 4 * info->sizedwords);
949
950 foreach_block (block, &shader->block_list) {
951 unsigned sfu_delay = 0;
952
953 foreach_instr (instr, &block->instr_list) {
954 int ret = emit[opc_cat(instr->opc)](instr, dwords, info);
955 if (ret)
956 goto fail;
957
958 if ((instr->opc == OPC_BARY_F) && (instr->regs[0]->flags & IR3_REG_EI))
959 info->last_baryf = info->instrs_count;
960
961 info->instrs_count += 1 + instr->repeat + instr->nop;
962 info->nops_count += instr->nop;
963 if (instr->opc == OPC_NOP)
964 info->nops_count += 1 + instr->repeat;
965 if (instr->opc == OPC_MOV) {
966 if (instr->cat1.src_type == instr->cat1.dst_type) {
967 info->mov_count += 1 + instr->repeat;
968 } else {
969 info->cov_count += 1 + instr->repeat;
970 }
971 }
972 dwords += 2;
973
974 if (instr->flags & IR3_INSTR_SS) {
975 info->ss++;
976 info->sstall += sfu_delay;
977 }
978
979 if (instr->flags & IR3_INSTR_SY)
980 info->sy++;
981
982 if (is_sfu(instr)) {
983 sfu_delay = 10;
984 } else if (sfu_delay > 0) {
985 sfu_delay--;
986 }
987 }
988 }
989
990 return ptr;
991
992 fail:
993 free(ptr);
994 return NULL;
995 }
996
997 static struct ir3_register * reg_create(struct ir3 *shader,
998 int num, int flags)
999 {
1000 struct ir3_register *reg =
1001 ir3_alloc(shader, sizeof(struct ir3_register));
1002 reg->wrmask = 1;
1003 reg->flags = flags;
1004 reg->num = num;
1005 return reg;
1006 }
1007
1008 static void insert_instr(struct ir3_block *block,
1009 struct ir3_instruction *instr)
1010 {
1011 struct ir3 *shader = block->shader;
1012 #ifdef DEBUG
1013 instr->serialno = ++shader->instr_count;
1014 #endif
1015 list_addtail(&instr->node, &block->instr_list);
1016
1017 if (is_input(instr))
1018 array_insert(shader, shader->baryfs, instr);
1019 }
1020
1021 struct ir3_block * ir3_block_create(struct ir3 *shader)
1022 {
1023 struct ir3_block *block = ir3_alloc(shader, sizeof(*block));
1024 #ifdef DEBUG
1025 block->serialno = ++shader->block_count;
1026 #endif
1027 block->shader = shader;
1028 list_inithead(&block->node);
1029 list_inithead(&block->instr_list);
1030 block->predecessors = _mesa_pointer_set_create(block);
1031 return block;
1032 }
1033
1034 static struct ir3_instruction *instr_create(struct ir3_block *block, int nreg)
1035 {
1036 struct ir3_instruction *instr;
1037 unsigned sz = sizeof(*instr) + (nreg * sizeof(instr->regs[0]));
1038 char *ptr = ir3_alloc(block->shader, sz);
1039
1040 instr = (struct ir3_instruction *)ptr;
1041 ptr += sizeof(*instr);
1042 instr->regs = (struct ir3_register **)ptr;
1043
1044 #ifdef DEBUG
1045 instr->regs_max = nreg;
1046 #endif
1047
1048 return instr;
1049 }
1050
1051 struct ir3_instruction * ir3_instr_create2(struct ir3_block *block,
1052 opc_t opc, int nreg)
1053 {
1054 struct ir3_instruction *instr = instr_create(block, nreg);
1055 instr->block = block;
1056 instr->opc = opc;
1057 insert_instr(block, instr);
1058 return instr;
1059 }
1060
1061 struct ir3_instruction * ir3_instr_create(struct ir3_block *block, opc_t opc)
1062 {
1063 /* NOTE: we could be slightly more clever, at least for non-meta,
1064 * and choose # of regs based on category.
1065 */
1066 return ir3_instr_create2(block, opc, 4);
1067 }
1068
1069 struct ir3_instruction * ir3_instr_clone(struct ir3_instruction *instr)
1070 {
1071 struct ir3_instruction *new_instr = instr_create(instr->block,
1072 instr->regs_count);
1073 struct ir3_register **regs;
1074 unsigned i;
1075
1076 regs = new_instr->regs;
1077 *new_instr = *instr;
1078 new_instr->regs = regs;
1079
1080 insert_instr(instr->block, new_instr);
1081
1082 /* clone registers: */
1083 new_instr->regs_count = 0;
1084 for (i = 0; i < instr->regs_count; i++) {
1085 struct ir3_register *reg = instr->regs[i];
1086 struct ir3_register *new_reg =
1087 ir3_reg_create(new_instr, reg->num, reg->flags);
1088 *new_reg = *reg;
1089 }
1090
1091 return new_instr;
1092 }
1093
1094 /* Add a false dependency to instruction, to ensure it is scheduled first: */
1095 void ir3_instr_add_dep(struct ir3_instruction *instr, struct ir3_instruction *dep)
1096 {
1097 array_insert(instr, instr->deps, dep);
1098 }
1099
1100 struct ir3_register * ir3_reg_create(struct ir3_instruction *instr,
1101 int num, int flags)
1102 {
1103 struct ir3 *shader = instr->block->shader;
1104 struct ir3_register *reg = reg_create(shader, num, flags);
1105 #ifdef DEBUG
1106 debug_assert(instr->regs_count < instr->regs_max);
1107 #endif
1108 instr->regs[instr->regs_count++] = reg;
1109 return reg;
1110 }
1111
1112 struct ir3_register * ir3_reg_clone(struct ir3 *shader,
1113 struct ir3_register *reg)
1114 {
1115 struct ir3_register *new_reg = reg_create(shader, 0, 0);
1116 *new_reg = *reg;
1117 return new_reg;
1118 }
1119
1120 void
1121 ir3_instr_set_address(struct ir3_instruction *instr,
1122 struct ir3_instruction *addr)
1123 {
1124 if (instr->address != addr) {
1125 struct ir3 *ir = instr->block->shader;
1126
1127 debug_assert(!instr->address);
1128 debug_assert(instr->block == addr->block);
1129
1130 instr->address = addr;
1131 debug_assert(reg_num(addr->regs[0]) == REG_A0);
1132 unsigned comp = reg_comp(addr->regs[0]);
1133 if (comp == 0) {
1134 array_insert(ir, ir->a0_users, instr);
1135 } else {
1136 debug_assert(comp == 1);
1137 array_insert(ir, ir->a1_users, instr);
1138 }
1139 }
1140 }
1141
1142 void
1143 ir3_block_clear_mark(struct ir3_block *block)
1144 {
1145 foreach_instr (instr, &block->instr_list)
1146 instr->flags &= ~IR3_INSTR_MARK;
1147 }
1148
1149 void
1150 ir3_clear_mark(struct ir3 *ir)
1151 {
1152 foreach_block (block, &ir->block_list) {
1153 ir3_block_clear_mark(block);
1154 }
1155 }
1156
1157 unsigned
1158 ir3_count_instructions(struct ir3 *ir)
1159 {
1160 unsigned cnt = 1;
1161 foreach_block (block, &ir->block_list) {
1162 block->start_ip = cnt;
1163 foreach_instr (instr, &block->instr_list) {
1164 instr->ip = cnt++;
1165 }
1166 block->end_ip = cnt;
1167 }
1168 return cnt;
1169 }
1170
1171 /* When counting instructions for RA, we insert extra fake instructions at the
1172 * beginning of each block, where values become live, and at the end where
1173 * values die. This prevents problems where values live-in at the beginning or
1174 * live-out at the end of a block from being treated as if they were
1175 * live-in/live-out at the first/last instruction, which would be incorrect.
1176 * In ir3_legalize these ip's are assumed to be actual ip's of the final
1177 * program, so it would be incorrect to use this everywhere.
1178 */
1179
1180 unsigned
1181 ir3_count_instructions_ra(struct ir3 *ir)
1182 {
1183 unsigned cnt = 1;
1184 foreach_block (block, &ir->block_list) {
1185 block->start_ip = cnt++;
1186 foreach_instr (instr, &block->instr_list) {
1187 instr->ip = cnt++;
1188 }
1189 block->end_ip = cnt++;
1190 }
1191 return cnt;
1192 }
1193
1194 struct ir3_array *
1195 ir3_lookup_array(struct ir3 *ir, unsigned id)
1196 {
1197 foreach_array (arr, &ir->array_list)
1198 if (arr->id == id)
1199 return arr;
1200 return NULL;
1201 }
1202
1203 void
1204 ir3_find_ssa_uses(struct ir3 *ir, void *mem_ctx, bool falsedeps)
1205 {
1206 /* We could do this in a single pass if we can assume instructions
1207 * are always sorted. Which currently might not always be true.
1208 * (In particular after ir3_group pass, but maybe other places.)
1209 */
1210 foreach_block (block, &ir->block_list)
1211 foreach_instr (instr, &block->instr_list)
1212 instr->uses = NULL;
1213
1214 foreach_block (block, &ir->block_list) {
1215 foreach_instr (instr, &block->instr_list) {
1216 foreach_ssa_src_n (src, n, instr) {
1217 if (__is_false_dep(instr, n) && !falsedeps)
1218 continue;
1219 if (!src->uses)
1220 src->uses = _mesa_pointer_set_create(mem_ctx);
1221 _mesa_set_add(src->uses, instr);
1222 }
1223 }
1224 }
1225 }
1226
1227 /**
1228 * Set the destination type of an instruction, for example if a
1229 * conversion is folded in, handling the special cases where the
1230 * instruction's dest type or opcode needs to be fixed up.
1231 */
1232 void
1233 ir3_set_dst_type(struct ir3_instruction *instr, bool half)
1234 {
1235 if (half) {
1236 instr->regs[0]->flags |= IR3_REG_HALF;
1237 } else {
1238 instr->regs[0]->flags &= ~IR3_REG_HALF;
1239 }
1240
1241 switch (opc_cat(instr->opc)) {
1242 case 1: /* move instructions */
1243 if (half) {
1244 instr->cat1.dst_type = half_type(instr->cat1.dst_type);
1245 } else {
1246 instr->cat1.dst_type = full_type(instr->cat1.dst_type);
1247 }
1248 break;
1249 case 4:
1250 if (half) {
1251 instr->opc = cat4_half_opc(instr->opc);
1252 } else {
1253 instr->opc = cat4_full_opc(instr->opc);
1254 }
1255 break;
1256 case 5:
1257 if (half) {
1258 instr->cat5.type = half_type(instr->cat5.type);
1259 } else {
1260 instr->cat5.type = full_type(instr->cat5.type);
1261 }
1262 break;
1263 }
1264 }
1265
1266 /**
1267 * One-time fixup for instruction src-types. Other than cov's that
1268 * are folded, an instruction's src type does not change.
1269 */
1270 void
1271 ir3_fixup_src_type(struct ir3_instruction *instr)
1272 {
1273 bool half = !!(instr->regs[1]->flags & IR3_REG_HALF);
1274
1275 switch (opc_cat(instr->opc)) {
1276 case 1: /* move instructions */
1277 if (half) {
1278 instr->cat1.src_type = half_type(instr->cat1.src_type);
1279 } else {
1280 instr->cat1.src_type = full_type(instr->cat1.src_type);
1281 }
1282 break;
1283 case 3:
1284 if (half) {
1285 instr->opc = cat3_half_opc(instr->opc);
1286 } else {
1287 instr->opc = cat3_full_opc(instr->opc);
1288 }
1289 break;
1290 }
1291 }
1292
1293 static unsigned
1294 cp_flags(unsigned flags)
1295 {
1296 /* only considering these flags (at least for now): */
1297 flags &= (IR3_REG_CONST | IR3_REG_IMMED |
1298 IR3_REG_FNEG | IR3_REG_FABS |
1299 IR3_REG_SNEG | IR3_REG_SABS |
1300 IR3_REG_BNOT | IR3_REG_RELATIV);
1301 return flags;
1302 }
1303
1304 bool
1305 ir3_valid_flags(struct ir3_instruction *instr, unsigned n,
1306 unsigned flags)
1307 {
1308 struct ir3_compiler *compiler = instr->block->shader->compiler;
1309 unsigned valid_flags;
1310
1311 if ((flags & IR3_REG_HIGH) &&
1312 (opc_cat(instr->opc) > 1) &&
1313 (compiler->gpu_id >= 600))
1314 return false;
1315
1316 flags = cp_flags(flags);
1317
1318 /* If destination is indirect, then source cannot be.. at least
1319 * I don't think so..
1320 */
1321 if ((instr->regs[0]->flags & IR3_REG_RELATIV) &&
1322 (flags & IR3_REG_RELATIV))
1323 return false;
1324
1325 if (flags & IR3_REG_RELATIV) {
1326 /* TODO need to test on earlier gens.. pretty sure the earlier
1327 * problem was just that we didn't check that the src was from
1328 * same block (since we can't propagate address register values
1329 * across blocks currently)
1330 */
1331 if (compiler->gpu_id < 600)
1332 return false;
1333
1334 /* NOTE in the special try_swap_mad_two_srcs() case we can be
1335 * called on a src that has already had an indirect load folded
1336 * in, in which case ssa() returns NULL
1337 */
1338 if (instr->regs[n+1]->flags & IR3_REG_SSA) {
1339 struct ir3_instruction *src = ssa(instr->regs[n+1]);
1340 if (src->address->block != instr->block)
1341 return false;
1342 }
1343 }
1344
1345 switch (opc_cat(instr->opc)) {
1346 case 1:
1347 valid_flags = IR3_REG_IMMED | IR3_REG_CONST | IR3_REG_RELATIV;
1348 if (flags & ~valid_flags)
1349 return false;
1350 break;
1351 case 2:
1352 valid_flags = ir3_cat2_absneg(instr->opc) |
1353 IR3_REG_CONST | IR3_REG_RELATIV;
1354
1355 if (ir3_cat2_int(instr->opc))
1356 valid_flags |= IR3_REG_IMMED;
1357
1358 if (flags & ~valid_flags)
1359 return false;
1360
1361 if (flags & (IR3_REG_CONST | IR3_REG_IMMED)) {
1362 unsigned m = (n ^ 1) + 1;
1363 /* cannot deal w/ const in both srcs:
1364 * (note that some cat2 actually only have a single src)
1365 */
1366 if (m < instr->regs_count) {
1367 struct ir3_register *reg = instr->regs[m];
1368 if ((flags & IR3_REG_CONST) && (reg->flags & IR3_REG_CONST))
1369 return false;
1370 if ((flags & IR3_REG_IMMED) && (reg->flags & IR3_REG_IMMED))
1371 return false;
1372 }
1373 }
1374 break;
1375 case 3:
1376 valid_flags = ir3_cat3_absneg(instr->opc) |
1377 IR3_REG_CONST | IR3_REG_RELATIV;
1378
1379 if (flags & ~valid_flags)
1380 return false;
1381
1382 if (flags & (IR3_REG_CONST | IR3_REG_RELATIV)) {
1383 /* cannot deal w/ const/relativ in 2nd src: */
1384 if (n == 1)
1385 return false;
1386 }
1387
1388 break;
1389 case 4:
1390 /* seems like blob compiler avoids const as src.. */
1391 /* TODO double check if this is still the case on a4xx */
1392 if (flags & (IR3_REG_CONST | IR3_REG_IMMED))
1393 return false;
1394 if (flags & (IR3_REG_SABS | IR3_REG_SNEG))
1395 return false;
1396 break;
1397 case 5:
1398 /* no flags allowed */
1399 if (flags)
1400 return false;
1401 break;
1402 case 6:
1403 valid_flags = IR3_REG_IMMED;
1404 if (flags & ~valid_flags)
1405 return false;
1406
1407 if (flags & IR3_REG_IMMED) {
1408 /* doesn't seem like we can have immediate src for store
1409 * instructions:
1410 *
1411 * TODO this restriction could also apply to load instructions,
1412 * but for load instructions this arg is the address (and not
1413 * really sure any good way to test a hard-coded immed addr src)
1414 */
1415 if (is_store(instr) && (n == 1))
1416 return false;
1417
1418 if ((instr->opc == OPC_LDL) && (n == 0))
1419 return false;
1420
1421 if ((instr->opc == OPC_STL) && (n != 2))
1422 return false;
1423
1424 if (instr->opc == OPC_STLW && n == 0)
1425 return false;
1426
1427 if (instr->opc == OPC_LDLW && n == 0)
1428 return false;
1429
1430 /* disallow immediates in anything but the SSBO slot argument for
1431 * cat6 instructions:
1432 */
1433 if (is_atomic(instr->opc) && (n != 0))
1434 return false;
1435
1436 if (is_atomic(instr->opc) && !(instr->flags & IR3_INSTR_G))
1437 return false;
1438
1439 if (instr->opc == OPC_STG && (instr->flags & IR3_INSTR_G) && (n != 2))
1440 return false;
1441
1442 /* as with atomics, these cat6 instrs can only have an immediate
1443 * for SSBO/IBO slot argument
1444 */
1445 switch (instr->opc) {
1446 case OPC_LDIB:
1447 case OPC_LDC:
1448 case OPC_RESINFO:
1449 if (n != 0)
1450 return false;
1451 break;
1452 default:
1453 break;
1454 }
1455 }
1456
1457 break;
1458 }
1459
1460 return true;
1461 }