freedreno/ir3: move ubo_state into const_state
[mesa.git] / src / freedreno / ir3 / ir3.c
1 /*
2 * Copyright (c) 2012 Rob Clark <robdclark@gmail.com>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #include "ir3.h"
25
26 #include <stdlib.h>
27 #include <stdio.h>
28 #include <string.h>
29 #include <assert.h>
30 #include <stdbool.h>
31 #include <errno.h>
32
33 #include "util/bitscan.h"
34 #include "util/ralloc.h"
35 #include "util/u_math.h"
36
37 #include "instr-a3xx.h"
38 #include "ir3_compiler.h"
39
40 /* simple allocator to carve allocations out of an up-front allocated heap,
41 * so that we can free everything easily in one shot.
42 */
43 void * ir3_alloc(struct ir3 *shader, int sz)
44 {
45 return rzalloc_size(shader, sz); /* TODO: don't use rzalloc */
46 }
47
48 struct ir3 * ir3_create(struct ir3_compiler *compiler, gl_shader_stage type)
49 {
50 struct ir3 *shader = rzalloc(NULL, struct ir3);
51
52 shader->compiler = compiler;
53 shader->type = type;
54
55 list_inithead(&shader->block_list);
56 list_inithead(&shader->array_list);
57
58 return shader;
59 }
60
61 void ir3_destroy(struct ir3 *shader)
62 {
63 ralloc_free(shader);
64 }
65
66 #define iassert(cond) do { \
67 if (!(cond)) { \
68 debug_assert(cond); \
69 return -1; \
70 } } while (0)
71
72 #define iassert_type(reg, full) do { \
73 if ((full)) { \
74 iassert(!((reg)->flags & IR3_REG_HALF)); \
75 } else { \
76 iassert((reg)->flags & IR3_REG_HALF); \
77 } } while (0);
78
79 static uint32_t reg(struct ir3_register *reg, struct ir3_info *info,
80 uint32_t repeat, uint32_t valid_flags)
81 {
82 struct ir3_shader_variant *v = info->data;
83 reg_t val = { .dummy32 = 0 };
84
85 if (reg->flags & ~valid_flags) {
86 debug_printf("INVALID FLAGS: %x vs %x\n",
87 reg->flags, valid_flags);
88 }
89
90 if (!(reg->flags & IR3_REG_R))
91 repeat = 0;
92
93 if (reg->flags & IR3_REG_IMMED) {
94 val.iim_val = reg->iim_val;
95 } else {
96 unsigned components;
97 int16_t max;
98
99 if (reg->flags & IR3_REG_RELATIV) {
100 components = reg->size;
101 val.idummy10 = reg->array.offset;
102 max = (reg->array.offset + repeat + components - 1);
103 } else {
104 components = util_last_bit(reg->wrmask);
105 val.comp = reg->num & 0x3;
106 val.num = reg->num >> 2;
107 max = (reg->num + repeat + components - 1);
108 }
109
110 if (reg->flags & IR3_REG_CONST) {
111 info->max_const = MAX2(info->max_const, max >> 2);
112 } else if (val.num == 63) {
113 /* ignore writes to dummy register r63.x */
114 } else if (max < regid(48, 0)) {
115 if (reg->flags & IR3_REG_HALF) {
116 if (v->mergedregs) {
117 /* starting w/ a6xx, half regs conflict with full regs: */
118 info->max_reg = MAX2(info->max_reg, max >> 3);
119 } else {
120 info->max_half_reg = MAX2(info->max_half_reg, max >> 2);
121 }
122 } else {
123 info->max_reg = MAX2(info->max_reg, max >> 2);
124 }
125 }
126 }
127
128 return val.dummy32;
129 }
130
131 static int emit_cat0(struct ir3_instruction *instr, void *ptr,
132 struct ir3_info *info)
133 {
134 struct ir3_shader_variant *v = info->data;
135 instr_cat0_t *cat0 = ptr;
136
137 if (v->shader->compiler->gpu_id >= 500) {
138 cat0->a5xx.immed = instr->cat0.immed;
139 } else if (v->shader->compiler->gpu_id >= 400) {
140 cat0->a4xx.immed = instr->cat0.immed;
141 } else {
142 cat0->a3xx.immed = instr->cat0.immed;
143 }
144 cat0->repeat = instr->repeat;
145 cat0->ss = !!(instr->flags & IR3_INSTR_SS);
146 cat0->inv0 = instr->cat0.inv;
147 cat0->comp0 = instr->cat0.comp;
148 cat0->opc = instr->opc;
149 cat0->opc_hi = instr->opc >= 16;
150 cat0->jmp_tgt = !!(instr->flags & IR3_INSTR_JP);
151 cat0->sync = !!(instr->flags & IR3_INSTR_SY);
152 cat0->opc_cat = 0;
153
154 return 0;
155 }
156
157 static int emit_cat1(struct ir3_instruction *instr, void *ptr,
158 struct ir3_info *info)
159 {
160 struct ir3_register *dst = instr->regs[0];
161 struct ir3_register *src = instr->regs[1];
162 instr_cat1_t *cat1 = ptr;
163
164 iassert(instr->regs_count == 2);
165 iassert_type(dst, type_size(instr->cat1.dst_type) == 32);
166 if (!(src->flags & IR3_REG_IMMED))
167 iassert_type(src, type_size(instr->cat1.src_type) == 32);
168
169 if (src->flags & IR3_REG_IMMED) {
170 cat1->iim_val = src->iim_val;
171 cat1->src_im = 1;
172 } else if (src->flags & IR3_REG_RELATIV) {
173 cat1->off = reg(src, info, instr->repeat,
174 IR3_REG_R | IR3_REG_CONST | IR3_REG_HALF | IR3_REG_RELATIV);
175 cat1->src_rel = 1;
176 cat1->src_rel_c = !!(src->flags & IR3_REG_CONST);
177 } else {
178 cat1->src = reg(src, info, instr->repeat,
179 IR3_REG_R | IR3_REG_CONST | IR3_REG_HALF);
180 cat1->src_c = !!(src->flags & IR3_REG_CONST);
181 }
182
183 cat1->dst = reg(dst, info, instr->repeat,
184 IR3_REG_RELATIV | IR3_REG_EVEN |
185 IR3_REG_R | IR3_REG_POS_INF | IR3_REG_HALF);
186 cat1->repeat = instr->repeat;
187 cat1->src_r = !!(src->flags & IR3_REG_R);
188 cat1->ss = !!(instr->flags & IR3_INSTR_SS);
189 cat1->ul = !!(instr->flags & IR3_INSTR_UL);
190 cat1->dst_type = instr->cat1.dst_type;
191 cat1->dst_rel = !!(dst->flags & IR3_REG_RELATIV);
192 cat1->src_type = instr->cat1.src_type;
193 cat1->even = !!(dst->flags & IR3_REG_EVEN);
194 cat1->pos_inf = !!(dst->flags & IR3_REG_POS_INF);
195 cat1->jmp_tgt = !!(instr->flags & IR3_INSTR_JP);
196 cat1->sync = !!(instr->flags & IR3_INSTR_SY);
197 cat1->opc_cat = 1;
198
199 return 0;
200 }
201
202 static int emit_cat2(struct ir3_instruction *instr, void *ptr,
203 struct ir3_info *info)
204 {
205 struct ir3_register *dst = instr->regs[0];
206 struct ir3_register *src1 = instr->regs[1];
207 struct ir3_register *src2 = instr->regs[2];
208 instr_cat2_t *cat2 = ptr;
209 unsigned absneg = ir3_cat2_absneg(instr->opc);
210
211 iassert((instr->regs_count == 2) || (instr->regs_count == 3));
212
213 if (instr->nop) {
214 iassert(!instr->repeat);
215 iassert(instr->nop <= 3);
216
217 cat2->src1_r = instr->nop & 0x1;
218 cat2->src2_r = (instr->nop >> 1) & 0x1;
219 } else {
220 cat2->src1_r = !!(src1->flags & IR3_REG_R);
221 if (src2)
222 cat2->src2_r = !!(src2->flags & IR3_REG_R);
223 }
224
225 if (src1->flags & IR3_REG_RELATIV) {
226 iassert(src1->array.offset < (1 << 10));
227 cat2->rel1.src1 = reg(src1, info, instr->repeat,
228 IR3_REG_RELATIV | IR3_REG_CONST | IR3_REG_R |
229 IR3_REG_HALF | absneg);
230 cat2->rel1.src1_c = !!(src1->flags & IR3_REG_CONST);
231 cat2->rel1.src1_rel = 1;
232 } else if (src1->flags & IR3_REG_CONST) {
233 iassert(src1->num < (1 << 12));
234 cat2->c1.src1 = reg(src1, info, instr->repeat,
235 IR3_REG_CONST | IR3_REG_R | IR3_REG_HALF |
236 absneg);
237 cat2->c1.src1_c = 1;
238 } else {
239 iassert(src1->num < (1 << 11));
240 cat2->src1 = reg(src1, info, instr->repeat,
241 IR3_REG_IMMED | IR3_REG_R | IR3_REG_HALF |
242 absneg);
243 }
244 cat2->src1_im = !!(src1->flags & IR3_REG_IMMED);
245 cat2->src1_neg = !!(src1->flags & (IR3_REG_FNEG | IR3_REG_SNEG | IR3_REG_BNOT));
246 cat2->src1_abs = !!(src1->flags & (IR3_REG_FABS | IR3_REG_SABS));
247
248 if (src2) {
249 iassert((src2->flags & IR3_REG_IMMED) ||
250 !((src1->flags ^ src2->flags) & IR3_REG_HALF));
251
252 if (src2->flags & IR3_REG_RELATIV) {
253 iassert(src2->array.offset < (1 << 10));
254 cat2->rel2.src2 = reg(src2, info, instr->repeat,
255 IR3_REG_RELATIV | IR3_REG_CONST | IR3_REG_R |
256 IR3_REG_HALF | absneg);
257 cat2->rel2.src2_c = !!(src2->flags & IR3_REG_CONST);
258 cat2->rel2.src2_rel = 1;
259 } else if (src2->flags & IR3_REG_CONST) {
260 iassert(src2->num < (1 << 12));
261 cat2->c2.src2 = reg(src2, info, instr->repeat,
262 IR3_REG_CONST | IR3_REG_R | IR3_REG_HALF |
263 absneg);
264 cat2->c2.src2_c = 1;
265 } else {
266 iassert(src2->num < (1 << 11));
267 cat2->src2 = reg(src2, info, instr->repeat,
268 IR3_REG_IMMED | IR3_REG_R | IR3_REG_HALF |
269 absneg);
270 }
271
272 cat2->src2_im = !!(src2->flags & IR3_REG_IMMED);
273 cat2->src2_neg = !!(src2->flags & (IR3_REG_FNEG | IR3_REG_SNEG | IR3_REG_BNOT));
274 cat2->src2_abs = !!(src2->flags & (IR3_REG_FABS | IR3_REG_SABS));
275 }
276
277 cat2->dst = reg(dst, info, instr->repeat,
278 IR3_REG_R | IR3_REG_EI | IR3_REG_HALF);
279 cat2->repeat = instr->repeat;
280 cat2->sat = !!(instr->flags & IR3_INSTR_SAT);
281 cat2->ss = !!(instr->flags & IR3_INSTR_SS);
282 cat2->ul = !!(instr->flags & IR3_INSTR_UL);
283 cat2->dst_half = !!((src1->flags ^ dst->flags) & IR3_REG_HALF);
284 cat2->ei = !!(dst->flags & IR3_REG_EI);
285 cat2->cond = instr->cat2.condition;
286 cat2->full = ! (src1->flags & IR3_REG_HALF);
287 cat2->opc = instr->opc;
288 cat2->jmp_tgt = !!(instr->flags & IR3_INSTR_JP);
289 cat2->sync = !!(instr->flags & IR3_INSTR_SY);
290 cat2->opc_cat = 2;
291
292 return 0;
293 }
294
295 static int emit_cat3(struct ir3_instruction *instr, void *ptr,
296 struct ir3_info *info)
297 {
298 struct ir3_register *dst = instr->regs[0];
299 struct ir3_register *src1 = instr->regs[1];
300 struct ir3_register *src2 = instr->regs[2];
301 struct ir3_register *src3 = instr->regs[3];
302 unsigned absneg = ir3_cat3_absneg(instr->opc);
303 instr_cat3_t *cat3 = ptr;
304 uint32_t src_flags = 0;
305
306 switch (instr->opc) {
307 case OPC_MAD_F16:
308 case OPC_MAD_U16:
309 case OPC_MAD_S16:
310 case OPC_SEL_B16:
311 case OPC_SEL_S16:
312 case OPC_SEL_F16:
313 case OPC_SAD_S16:
314 case OPC_SAD_S32: // really??
315 src_flags |= IR3_REG_HALF;
316 break;
317 default:
318 break;
319 }
320
321 iassert(instr->regs_count == 4);
322 iassert(!((src1->flags ^ src_flags) & IR3_REG_HALF));
323 iassert(!((src2->flags ^ src_flags) & IR3_REG_HALF));
324 iassert(!((src3->flags ^ src_flags) & IR3_REG_HALF));
325
326 if (instr->nop) {
327 iassert(!instr->repeat);
328 iassert(instr->nop <= 3);
329
330 cat3->src1_r = instr->nop & 0x1;
331 cat3->src2_r = (instr->nop >> 1) & 0x1;
332 } else {
333 cat3->src1_r = !!(src1->flags & IR3_REG_R);
334 cat3->src2_r = !!(src2->flags & IR3_REG_R);
335 }
336
337 if (src1->flags & IR3_REG_RELATIV) {
338 iassert(src1->array.offset < (1 << 10));
339 cat3->rel1.src1 = reg(src1, info, instr->repeat,
340 IR3_REG_RELATIV | IR3_REG_CONST | IR3_REG_R |
341 IR3_REG_HALF | absneg);
342 cat3->rel1.src1_c = !!(src1->flags & IR3_REG_CONST);
343 cat3->rel1.src1_rel = 1;
344 } else if (src1->flags & IR3_REG_CONST) {
345 iassert(src1->num < (1 << 12));
346 cat3->c1.src1 = reg(src1, info, instr->repeat,
347 IR3_REG_CONST | IR3_REG_R | IR3_REG_HALF | absneg);
348 cat3->c1.src1_c = 1;
349 } else {
350 iassert(src1->num < (1 << 11));
351 cat3->src1 = reg(src1, info, instr->repeat,
352 IR3_REG_R | IR3_REG_HALF | absneg);
353 }
354
355 cat3->src1_neg = !!(src1->flags & (IR3_REG_FNEG | IR3_REG_SNEG | IR3_REG_BNOT));
356
357 cat3->src2 = reg(src2, info, instr->repeat,
358 IR3_REG_CONST | IR3_REG_R | IR3_REG_HALF | absneg);
359 cat3->src2_c = !!(src2->flags & IR3_REG_CONST);
360 cat3->src2_neg = !!(src2->flags & (IR3_REG_FNEG | IR3_REG_SNEG | IR3_REG_BNOT));
361
362 if (src3->flags & IR3_REG_RELATIV) {
363 iassert(src3->array.offset < (1 << 10));
364 cat3->rel2.src3 = reg(src3, info, instr->repeat,
365 IR3_REG_RELATIV | IR3_REG_CONST | IR3_REG_R |
366 IR3_REG_HALF | absneg);
367 cat3->rel2.src3_c = !!(src3->flags & IR3_REG_CONST);
368 cat3->rel2.src3_rel = 1;
369 } else if (src3->flags & IR3_REG_CONST) {
370 iassert(src3->num < (1 << 12));
371 cat3->c2.src3 = reg(src3, info, instr->repeat,
372 IR3_REG_CONST | IR3_REG_R | IR3_REG_HALF | absneg);
373 cat3->c2.src3_c = 1;
374 } else {
375 iassert(src3->num < (1 << 11));
376 cat3->src3 = reg(src3, info, instr->repeat,
377 IR3_REG_R | IR3_REG_HALF | absneg);
378 }
379
380 cat3->src3_neg = !!(src3->flags & (IR3_REG_FNEG | IR3_REG_SNEG | IR3_REG_BNOT));
381 cat3->src3_r = !!(src3->flags & IR3_REG_R);
382
383 cat3->dst = reg(dst, info, instr->repeat, IR3_REG_R | IR3_REG_HALF);
384 cat3->repeat = instr->repeat;
385 cat3->sat = !!(instr->flags & IR3_INSTR_SAT);
386 cat3->ss = !!(instr->flags & IR3_INSTR_SS);
387 cat3->ul = !!(instr->flags & IR3_INSTR_UL);
388 cat3->dst_half = !!((src_flags ^ dst->flags) & IR3_REG_HALF);
389 cat3->opc = instr->opc;
390 cat3->jmp_tgt = !!(instr->flags & IR3_INSTR_JP);
391 cat3->sync = !!(instr->flags & IR3_INSTR_SY);
392 cat3->opc_cat = 3;
393
394 return 0;
395 }
396
397 static int emit_cat4(struct ir3_instruction *instr, void *ptr,
398 struct ir3_info *info)
399 {
400 struct ir3_register *dst = instr->regs[0];
401 struct ir3_register *src = instr->regs[1];
402 instr_cat4_t *cat4 = ptr;
403
404 iassert(instr->regs_count == 2);
405
406 if (src->flags & IR3_REG_RELATIV) {
407 iassert(src->array.offset < (1 << 10));
408 cat4->rel.src = reg(src, info, instr->repeat,
409 IR3_REG_RELATIV | IR3_REG_CONST | IR3_REG_FNEG |
410 IR3_REG_FABS | IR3_REG_R | IR3_REG_HALF);
411 cat4->rel.src_c = !!(src->flags & IR3_REG_CONST);
412 cat4->rel.src_rel = 1;
413 } else if (src->flags & IR3_REG_CONST) {
414 iassert(src->num < (1 << 12));
415 cat4->c.src = reg(src, info, instr->repeat,
416 IR3_REG_CONST | IR3_REG_FNEG | IR3_REG_FABS |
417 IR3_REG_R | IR3_REG_HALF);
418 cat4->c.src_c = 1;
419 } else {
420 iassert(src->num < (1 << 11));
421 cat4->src = reg(src, info, instr->repeat,
422 IR3_REG_IMMED | IR3_REG_FNEG | IR3_REG_FABS |
423 IR3_REG_R | IR3_REG_HALF);
424 }
425
426 cat4->src_im = !!(src->flags & IR3_REG_IMMED);
427 cat4->src_neg = !!(src->flags & IR3_REG_FNEG);
428 cat4->src_abs = !!(src->flags & IR3_REG_FABS);
429 cat4->src_r = !!(src->flags & IR3_REG_R);
430
431 cat4->dst = reg(dst, info, instr->repeat, IR3_REG_R | IR3_REG_HALF);
432 cat4->repeat = instr->repeat;
433 cat4->sat = !!(instr->flags & IR3_INSTR_SAT);
434 cat4->ss = !!(instr->flags & IR3_INSTR_SS);
435 cat4->ul = !!(instr->flags & IR3_INSTR_UL);
436 cat4->dst_half = !!((src->flags ^ dst->flags) & IR3_REG_HALF);
437 cat4->full = ! (src->flags & IR3_REG_HALF);
438 cat4->opc = instr->opc;
439 cat4->jmp_tgt = !!(instr->flags & IR3_INSTR_JP);
440 cat4->sync = !!(instr->flags & IR3_INSTR_SY);
441 cat4->opc_cat = 4;
442
443 return 0;
444 }
445
446 static int emit_cat5(struct ir3_instruction *instr, void *ptr,
447 struct ir3_info *info)
448 {
449 struct ir3_register *dst = instr->regs[0];
450 /* To simplify things when there could be zero, one, or two args other
451 * than tex/sampler idx, we use the first src reg in the ir to hold
452 * samp_tex hvec2:
453 */
454 struct ir3_register *src1;
455 struct ir3_register *src2;
456 instr_cat5_t *cat5 = ptr;
457
458 iassert((instr->regs_count == 1) ||
459 (instr->regs_count == 2) ||
460 (instr->regs_count == 3) ||
461 (instr->regs_count == 4));
462
463 if (instr->flags & IR3_INSTR_S2EN) {
464 src1 = instr->regs[2];
465 src2 = instr->regs_count > 3 ? instr->regs[3] : NULL;
466 } else {
467 src1 = instr->regs_count > 1 ? instr->regs[1] : NULL;
468 src2 = instr->regs_count > 2 ? instr->regs[2] : NULL;
469 }
470
471 assume(src1 || !src2);
472
473 if (src1) {
474 cat5->full = ! (src1->flags & IR3_REG_HALF);
475 cat5->src1 = reg(src1, info, instr->repeat, IR3_REG_HALF);
476 }
477
478 if (src2) {
479 iassert(!((src1->flags ^ src2->flags) & IR3_REG_HALF));
480 cat5->src2 = reg(src2, info, instr->repeat, IR3_REG_HALF);
481 }
482
483 if (instr->flags & IR3_INSTR_B) {
484 cat5->s2en_bindless.base_hi = instr->cat5.tex_base >> 1;
485 cat5->base_lo = instr->cat5.tex_base & 1;
486 }
487
488 if (instr->flags & IR3_INSTR_S2EN) {
489 struct ir3_register *samp_tex = instr->regs[1];
490 iassert(samp_tex->flags & IR3_REG_HALF);
491 cat5->s2en_bindless.src3 = reg(samp_tex, info, instr->repeat,
492 (instr->flags & IR3_INSTR_B) ? 0 : IR3_REG_HALF);
493 if (instr->flags & IR3_INSTR_B) {
494 if (instr->flags & IR3_INSTR_A1EN) {
495 cat5->s2en_bindless.desc_mode = CAT5_BINDLESS_A1_UNIFORM;
496 } else {
497 cat5->s2en_bindless.desc_mode = CAT5_BINDLESS_UNIFORM;
498 }
499 } else {
500 /* TODO: This should probably be CAT5_UNIFORM, at least on a6xx,
501 * as this is what the blob does and it is presumably faster, but
502 * first we should confirm it is actually nonuniform and figure
503 * out when the whole descriptor mode mechanism was introduced.
504 */
505 cat5->s2en_bindless.desc_mode = CAT5_NONUNIFORM;
506 }
507 iassert(!(instr->cat5.samp | instr->cat5.tex));
508 } else if (instr->flags & IR3_INSTR_B) {
509 cat5->s2en_bindless.src3 = instr->cat5.samp;
510 if (instr->flags & IR3_INSTR_A1EN) {
511 cat5->s2en_bindless.desc_mode = CAT5_BINDLESS_A1_IMM;
512 } else {
513 cat5->s2en_bindless.desc_mode = CAT5_BINDLESS_IMM;
514 }
515 } else {
516 cat5->norm.samp = instr->cat5.samp;
517 cat5->norm.tex = instr->cat5.tex;
518 }
519
520 cat5->dst = reg(dst, info, instr->repeat, IR3_REG_R | IR3_REG_HALF);
521 cat5->wrmask = dst->wrmask;
522 cat5->type = instr->cat5.type;
523 cat5->is_3d = !!(instr->flags & IR3_INSTR_3D);
524 cat5->is_a = !!(instr->flags & IR3_INSTR_A);
525 cat5->is_s = !!(instr->flags & IR3_INSTR_S);
526 cat5->is_s2en_bindless = !!(instr->flags & (IR3_INSTR_S2EN | IR3_INSTR_B));
527 cat5->is_o = !!(instr->flags & IR3_INSTR_O);
528 cat5->is_p = !!(instr->flags & IR3_INSTR_P);
529 cat5->opc = instr->opc;
530 cat5->jmp_tgt = !!(instr->flags & IR3_INSTR_JP);
531 cat5->sync = !!(instr->flags & IR3_INSTR_SY);
532 cat5->opc_cat = 5;
533
534 return 0;
535 }
536
537 static int emit_cat6_a6xx(struct ir3_instruction *instr, void *ptr,
538 struct ir3_info *info)
539 {
540 struct ir3_register *ssbo;
541 instr_cat6_a6xx_t *cat6 = ptr;
542
543 ssbo = instr->regs[1];
544
545 cat6->type = instr->cat6.type;
546 cat6->d = instr->cat6.d - (instr->opc == OPC_LDC ? 0 : 1);
547 cat6->typed = instr->cat6.typed;
548 cat6->type_size = instr->cat6.iim_val - 1;
549 cat6->opc = instr->opc;
550 cat6->jmp_tgt = !!(instr->flags & IR3_INSTR_JP);
551 cat6->sync = !!(instr->flags & IR3_INSTR_SY);
552 cat6->opc_cat = 6;
553
554 cat6->ssbo = reg(ssbo, info, instr->repeat, IR3_REG_IMMED);
555
556 /* For unused sources in an opcode, initialize contents with the ir3 dest
557 * reg
558 */
559 switch (instr->opc) {
560 case OPC_RESINFO:
561 cat6->src1 = reg(instr->regs[0], info, instr->repeat, 0);
562 cat6->src2 = reg(instr->regs[0], info, instr->repeat, 0);
563 break;
564 case OPC_LDC:
565 case OPC_LDIB:
566 cat6->src1 = reg(instr->regs[2], info, instr->repeat, 0);
567 cat6->src2 = reg(instr->regs[0], info, instr->repeat, 0);
568 break;
569 default:
570 cat6->src1 = reg(instr->regs[2], info, instr->repeat, 0);
571 cat6->src2 = reg(instr->regs[3], info, instr->repeat, 0);
572 break;
573 }
574
575 if (instr->flags & IR3_INSTR_B) {
576 if (ssbo->flags & IR3_REG_IMMED) {
577 cat6->desc_mode = CAT6_BINDLESS_IMM;
578 } else {
579 cat6->desc_mode = CAT6_BINDLESS_UNIFORM;
580 }
581 cat6->base = instr->cat6.base;
582 } else {
583 if (ssbo->flags & IR3_REG_IMMED)
584 cat6->desc_mode = CAT6_IMM;
585 else
586 cat6->desc_mode = CAT6_UNIFORM;
587 }
588
589 switch (instr->opc) {
590 case OPC_ATOMIC_ADD:
591 case OPC_ATOMIC_SUB:
592 case OPC_ATOMIC_XCHG:
593 case OPC_ATOMIC_INC:
594 case OPC_ATOMIC_DEC:
595 case OPC_ATOMIC_CMPXCHG:
596 case OPC_ATOMIC_MIN:
597 case OPC_ATOMIC_MAX:
598 case OPC_ATOMIC_AND:
599 case OPC_ATOMIC_OR:
600 case OPC_ATOMIC_XOR:
601 cat6->pad1 = 0x1;
602 cat6->pad3 = 0xc;
603 cat6->pad5 = 0x3;
604 break;
605 case OPC_STIB:
606 cat6->pad1 = 0x0;
607 cat6->pad3 = 0xc;
608 cat6->pad5 = 0x2;
609 break;
610 case OPC_LDIB:
611 case OPC_RESINFO:
612 cat6->pad1 = 0x1;
613 cat6->pad3 = 0xc;
614 cat6->pad5 = 0x2;
615 break;
616 case OPC_LDC:
617 cat6->pad1 = 0x0;
618 cat6->pad3 = 0x8;
619 cat6->pad5 = 0x2;
620 break;
621 default:
622 iassert(0);
623 }
624 cat6->pad2 = 0x0;
625 cat6->pad4 = 0x0;
626
627 return 0;
628 }
629
630 static int emit_cat6(struct ir3_instruction *instr, void *ptr,
631 struct ir3_info *info)
632 {
633 struct ir3_shader_variant *v = info->data;
634 struct ir3_register *dst, *src1, *src2;
635 instr_cat6_t *cat6 = ptr;
636
637 /* In a6xx we start using a new instruction encoding for some of
638 * these instructions:
639 */
640 if (v->shader->compiler->gpu_id >= 600) {
641 switch (instr->opc) {
642 case OPC_ATOMIC_ADD:
643 case OPC_ATOMIC_SUB:
644 case OPC_ATOMIC_XCHG:
645 case OPC_ATOMIC_INC:
646 case OPC_ATOMIC_DEC:
647 case OPC_ATOMIC_CMPXCHG:
648 case OPC_ATOMIC_MIN:
649 case OPC_ATOMIC_MAX:
650 case OPC_ATOMIC_AND:
651 case OPC_ATOMIC_OR:
652 case OPC_ATOMIC_XOR:
653 /* The shared variants of these still use the old encoding: */
654 if (!(instr->flags & IR3_INSTR_G))
655 break;
656 /* fallthrough */
657 case OPC_STIB:
658 case OPC_LDIB:
659 case OPC_LDC:
660 case OPC_RESINFO:
661 return emit_cat6_a6xx(instr, ptr, info);
662 default:
663 break;
664 }
665 }
666
667 bool type_full = type_size(instr->cat6.type) == 32;
668
669 cat6->type = instr->cat6.type;
670 cat6->opc = instr->opc;
671 cat6->jmp_tgt = !!(instr->flags & IR3_INSTR_JP);
672 cat6->sync = !!(instr->flags & IR3_INSTR_SY);
673 cat6->g = !!(instr->flags & IR3_INSTR_G);
674 cat6->opc_cat = 6;
675
676 switch (instr->opc) {
677 case OPC_RESINFO:
678 case OPC_RESFMT:
679 iassert_type(instr->regs[0], type_full); /* dst */
680 iassert_type(instr->regs[1], type_full); /* src1 */
681 break;
682 case OPC_L2G:
683 case OPC_G2L:
684 iassert_type(instr->regs[0], true); /* dst */
685 iassert_type(instr->regs[1], true); /* src1 */
686 break;
687 case OPC_STG:
688 case OPC_STL:
689 case OPC_STP:
690 case OPC_STLW:
691 case OPC_STIB:
692 /* no dst, so regs[0] is dummy */
693 iassert_type(instr->regs[1], true); /* dst */
694 iassert_type(instr->regs[2], type_full); /* src1 */
695 iassert_type(instr->regs[3], true); /* src2 */
696 break;
697 default:
698 iassert_type(instr->regs[0], type_full); /* dst */
699 iassert_type(instr->regs[1], true); /* src1 */
700 if (instr->regs_count > 2)
701 iassert_type(instr->regs[2], true); /* src1 */
702 break;
703 }
704
705 /* the "dst" for a store instruction is (from the perspective
706 * of data flow in the shader, ie. register use/def, etc) in
707 * fact a register that is read by the instruction, rather
708 * than written:
709 */
710 if (is_store(instr)) {
711 iassert(instr->regs_count >= 3);
712
713 dst = instr->regs[1];
714 src1 = instr->regs[2];
715 src2 = (instr->regs_count >= 4) ? instr->regs[3] : NULL;
716 } else {
717 iassert(instr->regs_count >= 2);
718
719 dst = instr->regs[0];
720 src1 = instr->regs[1];
721 src2 = (instr->regs_count >= 3) ? instr->regs[2] : NULL;
722 }
723
724 /* TODO we need a more comprehensive list about which instructions
725 * can be encoded which way. Or possibly use IR3_INSTR_0 flag to
726 * indicate to use the src_off encoding even if offset is zero
727 * (but then what to do about dst_off?)
728 */
729 if (is_atomic(instr->opc)) {
730 instr_cat6ldgb_t *ldgb = ptr;
731
732 /* maybe these two bits both determine the instruction encoding? */
733 cat6->src_off = false;
734
735 ldgb->d = instr->cat6.d - 1;
736 ldgb->typed = instr->cat6.typed;
737 ldgb->type_size = instr->cat6.iim_val - 1;
738
739 ldgb->dst = reg(dst, info, instr->repeat, IR3_REG_R | IR3_REG_HALF);
740
741 if (ldgb->g) {
742 struct ir3_register *src3 = instr->regs[3];
743 struct ir3_register *src4 = instr->regs[4];
744
745 /* first src is src_ssbo: */
746 iassert(src1->flags & IR3_REG_IMMED);
747 ldgb->src_ssbo = src1->uim_val;
748 ldgb->src_ssbo_im = 0x1;
749
750 ldgb->src1 = reg(src2, info, instr->repeat, IR3_REG_IMMED);
751 ldgb->src1_im = !!(src2->flags & IR3_REG_IMMED);
752 ldgb->src2 = reg(src3, info, instr->repeat, IR3_REG_IMMED);
753 ldgb->src2_im = !!(src3->flags & IR3_REG_IMMED);
754
755 ldgb->src3 = reg(src4, info, instr->repeat, 0);
756 ldgb->pad0 = 0x1;
757 } else {
758 ldgb->src1 = reg(src1, info, instr->repeat, IR3_REG_IMMED);
759 ldgb->src1_im = !!(src1->flags & IR3_REG_IMMED);
760 ldgb->src2 = reg(src2, info, instr->repeat, IR3_REG_IMMED);
761 ldgb->src2_im = !!(src2->flags & IR3_REG_IMMED);
762 ldgb->pad0 = 0x1;
763 ldgb->src_ssbo_im = 0x0;
764 }
765
766 return 0;
767 } else if (instr->opc == OPC_LDGB) {
768 struct ir3_register *src3 = instr->regs[3];
769 instr_cat6ldgb_t *ldgb = ptr;
770
771 /* maybe these two bits both determine the instruction encoding? */
772 cat6->src_off = false;
773
774 ldgb->d = instr->cat6.d - 1;
775 ldgb->typed = instr->cat6.typed;
776 ldgb->type_size = instr->cat6.iim_val - 1;
777
778 ldgb->dst = reg(dst, info, instr->repeat, IR3_REG_R | IR3_REG_HALF);
779
780 /* first src is src_ssbo: */
781 iassert(src1->flags & IR3_REG_IMMED);
782 ldgb->src_ssbo = src1->uim_val;
783
784 /* then next two are src1/src2: */
785 ldgb->src1 = reg(src2, info, instr->repeat, IR3_REG_IMMED);
786 ldgb->src1_im = !!(src2->flags & IR3_REG_IMMED);
787 ldgb->src2 = reg(src3, info, instr->repeat, IR3_REG_IMMED);
788 ldgb->src2_im = !!(src3->flags & IR3_REG_IMMED);
789
790 ldgb->pad0 = 0x0;
791 ldgb->src_ssbo_im = true;
792
793 return 0;
794 } else if (instr->opc == OPC_RESINFO) {
795 instr_cat6ldgb_t *ldgb = ptr;
796
797 ldgb->d = instr->cat6.d - 1;
798
799 ldgb->dst = reg(dst, info, instr->repeat, IR3_REG_R | IR3_REG_HALF);
800
801 /* first src is src_ssbo: */
802 ldgb->src_ssbo = reg(src1, info, instr->repeat, IR3_REG_IMMED);
803 ldgb->src_ssbo_im = !!(src1->flags & IR3_REG_IMMED);
804
805 return 0;
806 } else if ((instr->opc == OPC_STGB) || (instr->opc == OPC_STIB)) {
807 struct ir3_register *src3 = instr->regs[4];
808 instr_cat6stgb_t *stgb = ptr;
809
810 /* maybe these two bits both determine the instruction encoding? */
811 cat6->src_off = true;
812 stgb->pad3 = 0x2;
813
814 stgb->d = instr->cat6.d - 1;
815 stgb->typed = instr->cat6.typed;
816 stgb->type_size = instr->cat6.iim_val - 1;
817
818 /* first src is dst_ssbo: */
819 iassert(dst->flags & IR3_REG_IMMED);
820 stgb->dst_ssbo = dst->uim_val;
821
822 /* then src1/src2/src3: */
823 stgb->src1 = reg(src1, info, instr->repeat, 0);
824 stgb->src2 = reg(src2, info, instr->repeat, IR3_REG_IMMED);
825 stgb->src2_im = !!(src2->flags & IR3_REG_IMMED);
826 stgb->src3 = reg(src3, info, instr->repeat, IR3_REG_IMMED);
827 stgb->src3_im = !!(src3->flags & IR3_REG_IMMED);
828
829 return 0;
830 } else if (instr->cat6.src_offset || (instr->opc == OPC_LDG) ||
831 (instr->opc == OPC_LDL) || (instr->opc == OPC_LDLW)) {
832 struct ir3_register *src3 = instr->regs[3];
833 instr_cat6a_t *cat6a = ptr;
834
835 cat6->src_off = true;
836
837 if (instr->opc == OPC_LDG) {
838 /* For LDG src1 can not be immediate, so src1_imm is redundant and
839 * instead used to signal whether (when true) 'off' is a 32 bit
840 * register or an immediate offset.
841 */
842 cat6a->src1 = reg(src1, info, instr->repeat, 0);
843 cat6a->src1_im = !(src3->flags & IR3_REG_IMMED);
844 cat6a->off = reg(src3, info, instr->repeat, IR3_REG_IMMED);
845 } else {
846 cat6a->src1 = reg(src1, info, instr->repeat, IR3_REG_IMMED);
847 cat6a->src1_im = !!(src1->flags & IR3_REG_IMMED);
848 cat6a->off = reg(src3, info, instr->repeat, IR3_REG_IMMED);
849 iassert(src3->flags & IR3_REG_IMMED);
850 }
851
852 /* Num components */
853 cat6a->src2 = reg(src2, info, instr->repeat, IR3_REG_IMMED);
854 cat6a->src2_im = true;
855 } else {
856 instr_cat6b_t *cat6b = ptr;
857
858 cat6->src_off = false;
859
860 cat6b->src1 = reg(src1, info, instr->repeat, IR3_REG_IMMED | IR3_REG_HALF);
861 cat6b->src1_im = !!(src1->flags & IR3_REG_IMMED);
862 if (src2) {
863 cat6b->src2 = reg(src2, info, instr->repeat, IR3_REG_IMMED);
864 cat6b->src2_im = !!(src2->flags & IR3_REG_IMMED);
865 }
866 }
867
868 if (instr->cat6.dst_offset || (instr->opc == OPC_STG) ||
869 (instr->opc == OPC_STL) || (instr->opc == OPC_STLW)) {
870 instr_cat6c_t *cat6c = ptr;
871 cat6->dst_off = true;
872 cat6c->dst = reg(dst, info, instr->repeat, IR3_REG_R | IR3_REG_HALF);
873
874 if (instr->flags & IR3_INSTR_G) {
875 struct ir3_register *src3 = instr->regs[4];
876 cat6c->off = reg(src3, info, instr->repeat, IR3_REG_R | IR3_REG_HALF);
877 if (src3->flags & IR3_REG_IMMED) {
878 /* Immediate offsets are in bytes... */
879 cat6->g = false;
880 cat6c->off *= 4;
881 }
882 } else {
883 cat6c->off = instr->cat6.dst_offset;
884 }
885 } else {
886 instr_cat6d_t *cat6d = ptr;
887 cat6->dst_off = false;
888 cat6d->dst = reg(dst, info, instr->repeat, IR3_REG_R | IR3_REG_HALF);
889 }
890
891 return 0;
892 }
893
894 static int emit_cat7(struct ir3_instruction *instr, void *ptr,
895 struct ir3_info *info)
896 {
897 instr_cat7_t *cat7 = ptr;
898
899 cat7->ss = !!(instr->flags & IR3_INSTR_SS);
900 cat7->w = instr->cat7.w;
901 cat7->r = instr->cat7.r;
902 cat7->l = instr->cat7.l;
903 cat7->g = instr->cat7.g;
904 cat7->opc = instr->opc;
905 cat7->jmp_tgt = !!(instr->flags & IR3_INSTR_JP);
906 cat7->sync = !!(instr->flags & IR3_INSTR_SY);
907 cat7->opc_cat = 7;
908
909 return 0;
910 }
911
912 static int (*emit[])(struct ir3_instruction *instr, void *ptr,
913 struct ir3_info *info) = {
914 emit_cat0, emit_cat1, emit_cat2, emit_cat3, emit_cat4, emit_cat5, emit_cat6,
915 emit_cat7,
916 };
917
918 void * ir3_assemble(struct ir3_shader_variant *v)
919 {
920 uint32_t *ptr, *dwords;
921 struct ir3_info *info = &v->info;
922 struct ir3 *shader = v->ir;
923
924 memset(info, 0, sizeof(*info));
925 info->data = v;
926 info->max_reg = -1;
927 info->max_half_reg = -1;
928 info->max_const = -1;
929
930 foreach_block (block, &shader->block_list) {
931 foreach_instr (instr, &block->instr_list) {
932 info->sizedwords += 2;
933 }
934 }
935
936 /* need an integer number of instruction "groups" (sets of 16
937 * instructions on a4xx or sets of 4 instructions on a3xx),
938 * so pad out w/ NOPs if needed: (NOTE each instruction is 64bits)
939 */
940 if (v->shader->compiler->gpu_id >= 400) {
941 info->sizedwords = align(info->sizedwords, 16 * 2);
942 } else {
943 info->sizedwords = align(info->sizedwords, 4 * 2);
944 }
945
946 ptr = dwords = calloc(4, info->sizedwords);
947
948 foreach_block (block, &shader->block_list) {
949 unsigned sfu_delay = 0;
950
951 foreach_instr (instr, &block->instr_list) {
952 int ret = emit[opc_cat(instr->opc)](instr, dwords, info);
953 if (ret)
954 goto fail;
955
956 if ((instr->opc == OPC_BARY_F) && (instr->regs[0]->flags & IR3_REG_EI))
957 info->last_baryf = info->instrs_count;
958
959 info->instrs_count += 1 + instr->repeat + instr->nop;
960 info->nops_count += instr->nop;
961 if (instr->opc == OPC_NOP)
962 info->nops_count += 1 + instr->repeat;
963 if (instr->opc == OPC_MOV) {
964 if (instr->cat1.src_type == instr->cat1.dst_type) {
965 info->mov_count += 1 + instr->repeat;
966 } else {
967 info->cov_count += 1 + instr->repeat;
968 }
969 }
970 dwords += 2;
971
972 if (instr->flags & IR3_INSTR_SS) {
973 info->ss++;
974 info->sstall += sfu_delay;
975 }
976
977 if (instr->flags & IR3_INSTR_SY)
978 info->sy++;
979
980 if (is_sfu(instr)) {
981 sfu_delay = 10;
982 } else if (sfu_delay > 0) {
983 sfu_delay--;
984 }
985 }
986 }
987
988 return ptr;
989
990 fail:
991 free(ptr);
992 return NULL;
993 }
994
995 static struct ir3_register * reg_create(struct ir3 *shader,
996 int num, int flags)
997 {
998 struct ir3_register *reg =
999 ir3_alloc(shader, sizeof(struct ir3_register));
1000 reg->wrmask = 1;
1001 reg->flags = flags;
1002 reg->num = num;
1003 return reg;
1004 }
1005
1006 static void insert_instr(struct ir3_block *block,
1007 struct ir3_instruction *instr)
1008 {
1009 struct ir3 *shader = block->shader;
1010 #ifdef DEBUG
1011 instr->serialno = ++shader->instr_count;
1012 #endif
1013 list_addtail(&instr->node, &block->instr_list);
1014
1015 if (is_input(instr))
1016 array_insert(shader, shader->baryfs, instr);
1017 }
1018
1019 struct ir3_block * ir3_block_create(struct ir3 *shader)
1020 {
1021 struct ir3_block *block = ir3_alloc(shader, sizeof(*block));
1022 #ifdef DEBUG
1023 block->serialno = ++shader->block_count;
1024 #endif
1025 block->shader = shader;
1026 list_inithead(&block->node);
1027 list_inithead(&block->instr_list);
1028 block->predecessors = _mesa_pointer_set_create(block);
1029 return block;
1030 }
1031
1032 static struct ir3_instruction *instr_create(struct ir3_block *block, int nreg)
1033 {
1034 struct ir3_instruction *instr;
1035 unsigned sz = sizeof(*instr) + (nreg * sizeof(instr->regs[0]));
1036 char *ptr = ir3_alloc(block->shader, sz);
1037
1038 instr = (struct ir3_instruction *)ptr;
1039 ptr += sizeof(*instr);
1040 instr->regs = (struct ir3_register **)ptr;
1041
1042 #ifdef DEBUG
1043 instr->regs_max = nreg;
1044 #endif
1045
1046 return instr;
1047 }
1048
1049 struct ir3_instruction * ir3_instr_create2(struct ir3_block *block,
1050 opc_t opc, int nreg)
1051 {
1052 struct ir3_instruction *instr = instr_create(block, nreg);
1053 instr->block = block;
1054 instr->opc = opc;
1055 insert_instr(block, instr);
1056 return instr;
1057 }
1058
1059 struct ir3_instruction * ir3_instr_create(struct ir3_block *block, opc_t opc)
1060 {
1061 /* NOTE: we could be slightly more clever, at least for non-meta,
1062 * and choose # of regs based on category.
1063 */
1064 return ir3_instr_create2(block, opc, 4);
1065 }
1066
1067 struct ir3_instruction * ir3_instr_clone(struct ir3_instruction *instr)
1068 {
1069 struct ir3_instruction *new_instr = instr_create(instr->block,
1070 instr->regs_count);
1071 struct ir3_register **regs;
1072 unsigned i;
1073
1074 regs = new_instr->regs;
1075 *new_instr = *instr;
1076 new_instr->regs = regs;
1077
1078 insert_instr(instr->block, new_instr);
1079
1080 /* clone registers: */
1081 new_instr->regs_count = 0;
1082 for (i = 0; i < instr->regs_count; i++) {
1083 struct ir3_register *reg = instr->regs[i];
1084 struct ir3_register *new_reg =
1085 ir3_reg_create(new_instr, reg->num, reg->flags);
1086 *new_reg = *reg;
1087 }
1088
1089 return new_instr;
1090 }
1091
1092 /* Add a false dependency to instruction, to ensure it is scheduled first: */
1093 void ir3_instr_add_dep(struct ir3_instruction *instr, struct ir3_instruction *dep)
1094 {
1095 array_insert(instr, instr->deps, dep);
1096 }
1097
1098 struct ir3_register * ir3_reg_create(struct ir3_instruction *instr,
1099 int num, int flags)
1100 {
1101 struct ir3 *shader = instr->block->shader;
1102 struct ir3_register *reg = reg_create(shader, num, flags);
1103 #ifdef DEBUG
1104 debug_assert(instr->regs_count < instr->regs_max);
1105 #endif
1106 instr->regs[instr->regs_count++] = reg;
1107 return reg;
1108 }
1109
1110 struct ir3_register * ir3_reg_clone(struct ir3 *shader,
1111 struct ir3_register *reg)
1112 {
1113 struct ir3_register *new_reg = reg_create(shader, 0, 0);
1114 *new_reg = *reg;
1115 return new_reg;
1116 }
1117
1118 void
1119 ir3_instr_set_address(struct ir3_instruction *instr,
1120 struct ir3_instruction *addr)
1121 {
1122 if (instr->address != addr) {
1123 struct ir3 *ir = instr->block->shader;
1124
1125 debug_assert(!instr->address);
1126 debug_assert(instr->block == addr->block);
1127
1128 instr->address = addr;
1129 debug_assert(reg_num(addr->regs[0]) == REG_A0);
1130 unsigned comp = reg_comp(addr->regs[0]);
1131 if (comp == 0) {
1132 array_insert(ir, ir->a0_users, instr);
1133 } else {
1134 debug_assert(comp == 1);
1135 array_insert(ir, ir->a1_users, instr);
1136 }
1137 }
1138 }
1139
1140 void
1141 ir3_block_clear_mark(struct ir3_block *block)
1142 {
1143 foreach_instr (instr, &block->instr_list)
1144 instr->flags &= ~IR3_INSTR_MARK;
1145 }
1146
1147 void
1148 ir3_clear_mark(struct ir3 *ir)
1149 {
1150 foreach_block (block, &ir->block_list) {
1151 ir3_block_clear_mark(block);
1152 }
1153 }
1154
1155 unsigned
1156 ir3_count_instructions(struct ir3 *ir)
1157 {
1158 unsigned cnt = 1;
1159 foreach_block (block, &ir->block_list) {
1160 block->start_ip = cnt;
1161 foreach_instr (instr, &block->instr_list) {
1162 instr->ip = cnt++;
1163 }
1164 block->end_ip = cnt;
1165 }
1166 return cnt;
1167 }
1168
1169 /* When counting instructions for RA, we insert extra fake instructions at the
1170 * beginning of each block, where values become live, and at the end where
1171 * values die. This prevents problems where values live-in at the beginning or
1172 * live-out at the end of a block from being treated as if they were
1173 * live-in/live-out at the first/last instruction, which would be incorrect.
1174 * In ir3_legalize these ip's are assumed to be actual ip's of the final
1175 * program, so it would be incorrect to use this everywhere.
1176 */
1177
1178 unsigned
1179 ir3_count_instructions_ra(struct ir3 *ir)
1180 {
1181 unsigned cnt = 1;
1182 foreach_block (block, &ir->block_list) {
1183 block->start_ip = cnt++;
1184 foreach_instr (instr, &block->instr_list) {
1185 instr->ip = cnt++;
1186 }
1187 block->end_ip = cnt++;
1188 }
1189 return cnt;
1190 }
1191
1192 struct ir3_array *
1193 ir3_lookup_array(struct ir3 *ir, unsigned id)
1194 {
1195 foreach_array (arr, &ir->array_list)
1196 if (arr->id == id)
1197 return arr;
1198 return NULL;
1199 }
1200
1201 void
1202 ir3_find_ssa_uses(struct ir3 *ir, void *mem_ctx, bool falsedeps)
1203 {
1204 /* We could do this in a single pass if we can assume instructions
1205 * are always sorted. Which currently might not always be true.
1206 * (In particular after ir3_group pass, but maybe other places.)
1207 */
1208 foreach_block (block, &ir->block_list)
1209 foreach_instr (instr, &block->instr_list)
1210 instr->uses = NULL;
1211
1212 foreach_block (block, &ir->block_list) {
1213 foreach_instr (instr, &block->instr_list) {
1214 foreach_ssa_src_n (src, n, instr) {
1215 if (__is_false_dep(instr, n) && !falsedeps)
1216 continue;
1217 if (!src->uses)
1218 src->uses = _mesa_pointer_set_create(mem_ctx);
1219 _mesa_set_add(src->uses, instr);
1220 }
1221 }
1222 }
1223 }
1224
1225 /**
1226 * Set the destination type of an instruction, for example if a
1227 * conversion is folded in, handling the special cases where the
1228 * instruction's dest type or opcode needs to be fixed up.
1229 */
1230 void
1231 ir3_set_dst_type(struct ir3_instruction *instr, bool half)
1232 {
1233 if (half) {
1234 instr->regs[0]->flags |= IR3_REG_HALF;
1235 } else {
1236 instr->regs[0]->flags &= ~IR3_REG_HALF;
1237 }
1238
1239 switch (opc_cat(instr->opc)) {
1240 case 1: /* move instructions */
1241 if (half) {
1242 instr->cat1.dst_type = half_type(instr->cat1.dst_type);
1243 } else {
1244 instr->cat1.dst_type = full_type(instr->cat1.dst_type);
1245 }
1246 break;
1247 case 4:
1248 if (half) {
1249 instr->opc = cat4_half_opc(instr->opc);
1250 } else {
1251 instr->opc = cat4_full_opc(instr->opc);
1252 }
1253 break;
1254 case 5:
1255 if (half) {
1256 instr->cat5.type = half_type(instr->cat5.type);
1257 } else {
1258 instr->cat5.type = full_type(instr->cat5.type);
1259 }
1260 break;
1261 }
1262 }
1263
1264 /**
1265 * One-time fixup for instruction src-types. Other than cov's that
1266 * are folded, an instruction's src type does not change.
1267 */
1268 void
1269 ir3_fixup_src_type(struct ir3_instruction *instr)
1270 {
1271 bool half = !!(instr->regs[1]->flags & IR3_REG_HALF);
1272
1273 switch (opc_cat(instr->opc)) {
1274 case 1: /* move instructions */
1275 if (half) {
1276 instr->cat1.src_type = half_type(instr->cat1.src_type);
1277 } else {
1278 instr->cat1.src_type = full_type(instr->cat1.src_type);
1279 }
1280 break;
1281 case 3:
1282 if (half) {
1283 instr->opc = cat3_half_opc(instr->opc);
1284 } else {
1285 instr->opc = cat3_full_opc(instr->opc);
1286 }
1287 break;
1288 }
1289 }
1290
1291 static unsigned
1292 cp_flags(unsigned flags)
1293 {
1294 /* only considering these flags (at least for now): */
1295 flags &= (IR3_REG_CONST | IR3_REG_IMMED |
1296 IR3_REG_FNEG | IR3_REG_FABS |
1297 IR3_REG_SNEG | IR3_REG_SABS |
1298 IR3_REG_BNOT | IR3_REG_RELATIV);
1299 return flags;
1300 }
1301
1302 bool
1303 ir3_valid_flags(struct ir3_instruction *instr, unsigned n,
1304 unsigned flags)
1305 {
1306 struct ir3_compiler *compiler = instr->block->shader->compiler;
1307 unsigned valid_flags;
1308
1309 if ((flags & IR3_REG_HIGH) &&
1310 (opc_cat(instr->opc) > 1) &&
1311 (compiler->gpu_id >= 600))
1312 return false;
1313
1314 flags = cp_flags(flags);
1315
1316 /* If destination is indirect, then source cannot be.. at least
1317 * I don't think so..
1318 */
1319 if ((instr->regs[0]->flags & IR3_REG_RELATIV) &&
1320 (flags & IR3_REG_RELATIV))
1321 return false;
1322
1323 if (flags & IR3_REG_RELATIV) {
1324 /* TODO need to test on earlier gens.. pretty sure the earlier
1325 * problem was just that we didn't check that the src was from
1326 * same block (since we can't propagate address register values
1327 * across blocks currently)
1328 */
1329 if (compiler->gpu_id < 600)
1330 return false;
1331
1332 /* NOTE in the special try_swap_mad_two_srcs() case we can be
1333 * called on a src that has already had an indirect load folded
1334 * in, in which case ssa() returns NULL
1335 */
1336 if (instr->regs[n+1]->flags & IR3_REG_SSA) {
1337 struct ir3_instruction *src = ssa(instr->regs[n+1]);
1338 if (src->address->block != instr->block)
1339 return false;
1340 }
1341 }
1342
1343 switch (opc_cat(instr->opc)) {
1344 case 1:
1345 valid_flags = IR3_REG_IMMED | IR3_REG_CONST | IR3_REG_RELATIV;
1346 if (flags & ~valid_flags)
1347 return false;
1348 break;
1349 case 2:
1350 valid_flags = ir3_cat2_absneg(instr->opc) |
1351 IR3_REG_CONST | IR3_REG_RELATIV;
1352
1353 if (ir3_cat2_int(instr->opc))
1354 valid_flags |= IR3_REG_IMMED;
1355
1356 if (flags & ~valid_flags)
1357 return false;
1358
1359 if (flags & (IR3_REG_CONST | IR3_REG_IMMED)) {
1360 unsigned m = (n ^ 1) + 1;
1361 /* cannot deal w/ const in both srcs:
1362 * (note that some cat2 actually only have a single src)
1363 */
1364 if (m < instr->regs_count) {
1365 struct ir3_register *reg = instr->regs[m];
1366 if ((flags & IR3_REG_CONST) && (reg->flags & IR3_REG_CONST))
1367 return false;
1368 if ((flags & IR3_REG_IMMED) && (reg->flags & IR3_REG_IMMED))
1369 return false;
1370 }
1371 }
1372 break;
1373 case 3:
1374 valid_flags = ir3_cat3_absneg(instr->opc) |
1375 IR3_REG_CONST | IR3_REG_RELATIV;
1376
1377 if (flags & ~valid_flags)
1378 return false;
1379
1380 if (flags & (IR3_REG_CONST | IR3_REG_RELATIV)) {
1381 /* cannot deal w/ const/relativ in 2nd src: */
1382 if (n == 1)
1383 return false;
1384 }
1385
1386 break;
1387 case 4:
1388 /* seems like blob compiler avoids const as src.. */
1389 /* TODO double check if this is still the case on a4xx */
1390 if (flags & (IR3_REG_CONST | IR3_REG_IMMED))
1391 return false;
1392 if (flags & (IR3_REG_SABS | IR3_REG_SNEG))
1393 return false;
1394 break;
1395 case 5:
1396 /* no flags allowed */
1397 if (flags)
1398 return false;
1399 break;
1400 case 6:
1401 valid_flags = IR3_REG_IMMED;
1402 if (flags & ~valid_flags)
1403 return false;
1404
1405 if (flags & IR3_REG_IMMED) {
1406 /* doesn't seem like we can have immediate src for store
1407 * instructions:
1408 *
1409 * TODO this restriction could also apply to load instructions,
1410 * but for load instructions this arg is the address (and not
1411 * really sure any good way to test a hard-coded immed addr src)
1412 */
1413 if (is_store(instr) && (n == 1))
1414 return false;
1415
1416 if ((instr->opc == OPC_LDL) && (n == 0))
1417 return false;
1418
1419 if ((instr->opc == OPC_STL) && (n != 2))
1420 return false;
1421
1422 if (instr->opc == OPC_STLW && n == 0)
1423 return false;
1424
1425 if (instr->opc == OPC_LDLW && n == 0)
1426 return false;
1427
1428 /* disallow immediates in anything but the SSBO slot argument for
1429 * cat6 instructions:
1430 */
1431 if (is_atomic(instr->opc) && (n != 0))
1432 return false;
1433
1434 if (is_atomic(instr->opc) && !(instr->flags & IR3_INSTR_G))
1435 return false;
1436
1437 if (instr->opc == OPC_STG && (instr->flags & IR3_INSTR_G) && (n != 2))
1438 return false;
1439
1440 /* as with atomics, these cat6 instrs can only have an immediate
1441 * for SSBO/IBO slot argument
1442 */
1443 switch (instr->opc) {
1444 case OPC_LDIB:
1445 case OPC_LDC:
1446 case OPC_RESINFO:
1447 if (n != 0)
1448 return false;
1449 break;
1450 default:
1451 break;
1452 }
1453 }
1454
1455 break;
1456 }
1457
1458 return true;
1459 }