freedreno/ir3: Leave bools as 1-bit, storing them in full regs.
[mesa.git] / src / freedreno / ir3 / ir3_context.c
1 /*
2 * Copyright (C) 2015-2018 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #include "ir3_compiler.h"
28 #include "ir3_context.h"
29 #include "ir3_image.h"
30 #include "ir3_shader.h"
31 #include "ir3_nir.h"
32
33 struct ir3_context *
34 ir3_context_init(struct ir3_compiler *compiler,
35 struct ir3_shader_variant *so)
36 {
37 struct ir3_context *ctx = rzalloc(NULL, struct ir3_context);
38
39 if (compiler->gpu_id >= 400) {
40 if (so->type == MESA_SHADER_VERTEX) {
41 ctx->astc_srgb = so->key.vastc_srgb;
42 } else if (so->type == MESA_SHADER_FRAGMENT) {
43 ctx->astc_srgb = so->key.fastc_srgb;
44 }
45
46 } else {
47 if (so->type == MESA_SHADER_VERTEX) {
48 ctx->samples = so->key.vsamples;
49 } else if (so->type == MESA_SHADER_FRAGMENT) {
50 ctx->samples = so->key.fsamples;
51 }
52 }
53
54 if (compiler->gpu_id >= 600) {
55 ctx->funcs = &ir3_a6xx_funcs;
56 } else if (compiler->gpu_id >= 400) {
57 ctx->funcs = &ir3_a4xx_funcs;
58 }
59
60 ctx->compiler = compiler;
61 ctx->so = so;
62 ctx->def_ht = _mesa_hash_table_create(ctx,
63 _mesa_hash_pointer, _mesa_key_pointer_equal);
64 ctx->block_ht = _mesa_hash_table_create(ctx,
65 _mesa_hash_pointer, _mesa_key_pointer_equal);
66 ctx->sel_cond_conversions = _mesa_hash_table_create(ctx,
67 _mesa_hash_pointer, _mesa_key_pointer_equal);
68
69 /* TODO: maybe generate some sort of bitmask of what key
70 * lowers vs what shader has (ie. no need to lower
71 * texture clamp lowering if no texture sample instrs)..
72 * although should be done further up the stack to avoid
73 * creating duplicate variants..
74 */
75
76 ctx->s = nir_shader_clone(ctx, so->shader->nir);
77 if (ir3_key_lowers_nir(&so->key))
78 ir3_optimize_nir(so->shader, ctx->s, &so->key);
79
80 /* this needs to be the last pass run, so do this here instead of
81 * in ir3_optimize_nir():
82 */
83 bool progress = false;
84 NIR_PASS(progress, ctx->s, nir_lower_locals_to_regs);
85
86 /* we could need cleanup after lower_locals_to_regs */
87 while (progress) {
88 progress = false;
89 NIR_PASS(progress, ctx->s, nir_opt_algebraic);
90 NIR_PASS(progress, ctx->s, nir_opt_constant_folding);
91 }
92
93 /* We want to lower nir_op_imul as late as possible, to catch also
94 * those generated by earlier passes (e.g, nir_lower_locals_to_regs).
95 * However, we want a final swing of a few passes to have a chance
96 * at optimizing the result.
97 */
98 progress = false;
99 NIR_PASS(progress, ctx->s, ir3_nir_lower_imul);
100 while (progress) {
101 progress = false;
102 NIR_PASS(progress, ctx->s, nir_opt_algebraic);
103 NIR_PASS(progress, ctx->s, nir_opt_copy_prop_vars);
104 NIR_PASS(progress, ctx->s, nir_opt_dead_write_vars);
105 NIR_PASS(progress, ctx->s, nir_opt_dce);
106 NIR_PASS(progress, ctx->s, nir_opt_constant_folding);
107 }
108
109 /* Enable the texture pre-fetch feature only a4xx onwards. But
110 * only enable it on generations that have been tested:
111 */
112 if ((so->type == MESA_SHADER_FRAGMENT) && (compiler->gpu_id >= 600))
113 NIR_PASS_V(ctx->s, ir3_nir_lower_tex_prefetch);
114
115 NIR_PASS_V(ctx->s, nir_convert_from_ssa, true);
116
117 if (shader_debug_enabled(so->type)) {
118 fprintf(stdout, "NIR (final form) for %s shader %s:\n",
119 ir3_shader_stage(so), so->shader->nir->info.name);
120 nir_print_shader(ctx->s, stdout);
121 }
122
123 ir3_ibo_mapping_init(&so->image_mapping, ctx->s->info.num_textures);
124
125 return ctx;
126 }
127
128 void
129 ir3_context_free(struct ir3_context *ctx)
130 {
131 ralloc_free(ctx);
132 }
133
134 /*
135 * Misc helpers
136 */
137
138 /* allocate a n element value array (to be populated by caller) and
139 * insert in def_ht
140 */
141 struct ir3_instruction **
142 ir3_get_dst_ssa(struct ir3_context *ctx, nir_ssa_def *dst, unsigned n)
143 {
144 struct ir3_instruction **value =
145 ralloc_array(ctx->def_ht, struct ir3_instruction *, n);
146 _mesa_hash_table_insert(ctx->def_ht, dst, value);
147 return value;
148 }
149
150 struct ir3_instruction **
151 ir3_get_dst(struct ir3_context *ctx, nir_dest *dst, unsigned n)
152 {
153 struct ir3_instruction **value;
154
155 if (dst->is_ssa) {
156 value = ir3_get_dst_ssa(ctx, &dst->ssa, n);
157 } else {
158 value = ralloc_array(ctx, struct ir3_instruction *, n);
159 }
160
161 /* NOTE: in non-ssa case, we don't really need to store last_dst
162 * but this helps us catch cases where put_dst() call is forgotten
163 */
164 compile_assert(ctx, !ctx->last_dst);
165 ctx->last_dst = value;
166 ctx->last_dst_n = n;
167
168 return value;
169 }
170
171 struct ir3_instruction * const *
172 ir3_get_src(struct ir3_context *ctx, nir_src *src)
173 {
174 if (src->is_ssa) {
175 struct hash_entry *entry;
176 entry = _mesa_hash_table_search(ctx->def_ht, src->ssa);
177 compile_assert(ctx, entry);
178 return entry->data;
179 } else {
180 nir_register *reg = src->reg.reg;
181 struct ir3_array *arr = ir3_get_array(ctx, reg);
182 unsigned num_components = arr->r->num_components;
183 struct ir3_instruction *addr = NULL;
184 struct ir3_instruction **value =
185 ralloc_array(ctx, struct ir3_instruction *, num_components);
186
187 if (src->reg.indirect)
188 addr = ir3_get_addr0(ctx, ir3_get_src(ctx, src->reg.indirect)[0],
189 reg->num_components);
190
191 for (unsigned i = 0; i < num_components; i++) {
192 unsigned n = src->reg.base_offset * reg->num_components + i;
193 compile_assert(ctx, n < arr->length);
194 value[i] = ir3_create_array_load(ctx, arr, n, addr, reg->bit_size);
195 }
196
197 return value;
198 }
199 }
200
201 void
202 ir3_put_dst(struct ir3_context *ctx, nir_dest *dst)
203 {
204 unsigned bit_size = nir_dest_bit_size(*dst);
205
206 /* add extra mov if dst value is HIGH reg.. in some cases not all
207 * instructions can read from HIGH regs, in cases where they can
208 * ir3_cp will clean up the extra mov:
209 */
210 for (unsigned i = 0; i < ctx->last_dst_n; i++) {
211 if (!ctx->last_dst[i])
212 continue;
213 if (ctx->last_dst[i]->regs[0]->flags & IR3_REG_HIGH) {
214 ctx->last_dst[i] = ir3_MOV(ctx->block, ctx->last_dst[i], TYPE_U32);
215 }
216 }
217
218 /* Note: 1-bit bools are stored in 32-bit regs */
219 if (bit_size == 16) {
220 for (unsigned i = 0; i < ctx->last_dst_n; i++) {
221 struct ir3_instruction *dst = ctx->last_dst[i];
222 dst->regs[0]->flags |= IR3_REG_HALF;
223 if (ctx->last_dst[i]->opc == OPC_META_SPLIT)
224 dst->regs[1]->instr->regs[0]->flags |= IR3_REG_HALF;
225 }
226 }
227
228 if (!dst->is_ssa) {
229 nir_register *reg = dst->reg.reg;
230 struct ir3_array *arr = ir3_get_array(ctx, reg);
231 unsigned num_components = ctx->last_dst_n;
232 struct ir3_instruction *addr = NULL;
233
234 if (dst->reg.indirect)
235 addr = ir3_get_addr0(ctx, ir3_get_src(ctx, dst->reg.indirect)[0],
236 reg->num_components);
237
238 for (unsigned i = 0; i < num_components; i++) {
239 unsigned n = dst->reg.base_offset * reg->num_components + i;
240 compile_assert(ctx, n < arr->length);
241 if (!ctx->last_dst[i])
242 continue;
243 ir3_create_array_store(ctx, arr, n, ctx->last_dst[i], addr);
244 }
245
246 ralloc_free(ctx->last_dst);
247 }
248
249 ctx->last_dst = NULL;
250 ctx->last_dst_n = 0;
251 }
252
253 static unsigned
254 dest_flags(struct ir3_instruction *instr)
255 {
256 return instr->regs[0]->flags & (IR3_REG_HALF | IR3_REG_HIGH);
257 }
258
259 struct ir3_instruction *
260 ir3_create_collect(struct ir3_context *ctx, struct ir3_instruction *const *arr,
261 unsigned arrsz)
262 {
263 struct ir3_block *block = ctx->block;
264 struct ir3_instruction *collect;
265
266 if (arrsz == 0)
267 return NULL;
268
269 unsigned flags = dest_flags(arr[0]);
270
271 collect = ir3_instr_create2(block, OPC_META_COLLECT, 1 + arrsz);
272 __ssa_dst(collect)->flags |= flags;
273 for (unsigned i = 0; i < arrsz; i++) {
274 struct ir3_instruction *elem = arr[i];
275
276 /* Since arrays are pre-colored in RA, we can't assume that
277 * things will end up in the right place. (Ie. if a collect
278 * joins elements from two different arrays.) So insert an
279 * extra mov.
280 *
281 * We could possibly skip this if all the collected elements
282 * are contiguous elements in a single array.. not sure how
283 * likely that is to happen.
284 *
285 * Fixes a problem with glamor shaders, that in effect do
286 * something like:
287 *
288 * if (foo)
289 * texcoord = ..
290 * else
291 * texcoord = ..
292 * color = texture2D(tex, texcoord);
293 *
294 * In this case, texcoord will end up as nir registers (which
295 * translate to ir3 array's of length 1. And we can't assume
296 * the two (or more) arrays will get allocated in consecutive
297 * scalar registers.
298 *
299 */
300 if (elem->regs[0]->flags & IR3_REG_ARRAY) {
301 type_t type = (flags & IR3_REG_HALF) ? TYPE_U16 : TYPE_U32;
302 elem = ir3_MOV(block, elem, type);
303 }
304
305 compile_assert(ctx, dest_flags(elem) == flags);
306 __ssa_src(collect, elem, flags);
307 }
308
309 collect->regs[0]->wrmask = MASK(arrsz);
310
311 return collect;
312 }
313
314 /* helper for instructions that produce multiple consecutive scalar
315 * outputs which need to have a split meta instruction inserted
316 */
317 void
318 ir3_split_dest(struct ir3_block *block, struct ir3_instruction **dst,
319 struct ir3_instruction *src, unsigned base, unsigned n)
320 {
321 struct ir3_instruction *prev = NULL;
322
323 if ((n == 1) && (src->regs[0]->wrmask == 0x1)) {
324 dst[0] = src;
325 return;
326 }
327
328 if (src->opc == OPC_META_COLLECT) {
329 debug_assert((base + n) < src->regs_count);
330
331 for (int i = 0; i < n; i++) {
332 dst[i] = ssa(src->regs[i + base + 1]);
333 }
334
335 return;
336 }
337
338 unsigned flags = dest_flags(src);
339
340 for (int i = 0, j = 0; i < n; i++) {
341 struct ir3_instruction *split =
342 ir3_instr_create(block, OPC_META_SPLIT);
343 __ssa_dst(split)->flags |= flags;
344 __ssa_src(split, src, flags);
345 split->split.off = i + base;
346
347 if (prev) {
348 split->cp.left = prev;
349 split->cp.left_cnt++;
350 prev->cp.right = split;
351 prev->cp.right_cnt++;
352 }
353 prev = split;
354
355 if (src->regs[0]->wrmask & (1 << (i + base)))
356 dst[j++] = split;
357 }
358 }
359
360 NORETURN void
361 ir3_context_error(struct ir3_context *ctx, const char *format, ...)
362 {
363 struct hash_table *errors = NULL;
364 va_list ap;
365 va_start(ap, format);
366 if (ctx->cur_instr) {
367 errors = _mesa_hash_table_create(NULL,
368 _mesa_hash_pointer,
369 _mesa_key_pointer_equal);
370 char *msg = ralloc_vasprintf(errors, format, ap);
371 _mesa_hash_table_insert(errors, ctx->cur_instr, msg);
372 } else {
373 _debug_vprintf(format, ap);
374 }
375 va_end(ap);
376 nir_print_shader_annotated(ctx->s, stdout, errors);
377 ralloc_free(errors);
378 ctx->error = true;
379 unreachable("");
380 }
381
382 static struct ir3_instruction *
383 create_addr0(struct ir3_block *block, struct ir3_instruction *src, int align)
384 {
385 struct ir3_instruction *instr, *immed;
386
387 /* TODO in at least some cases, the backend could probably be
388 * made clever enough to propagate IR3_REG_HALF..
389 */
390 instr = ir3_COV(block, src, TYPE_U32, TYPE_S16);
391 instr->regs[0]->flags |= IR3_REG_HALF;
392
393 switch(align){
394 case 1:
395 /* src *= 1: */
396 break;
397 case 2:
398 /* src *= 2 => src <<= 1: */
399 immed = create_immed(block, 1);
400 immed->regs[0]->flags |= IR3_REG_HALF;
401
402 instr = ir3_SHL_B(block, instr, 0, immed, 0);
403 instr->regs[0]->flags |= IR3_REG_HALF;
404 instr->regs[1]->flags |= IR3_REG_HALF;
405 break;
406 case 3:
407 /* src *= 3: */
408 immed = create_immed(block, 3);
409 immed->regs[0]->flags |= IR3_REG_HALF;
410
411 instr = ir3_MULL_U(block, instr, 0, immed, 0);
412 instr->regs[0]->flags |= IR3_REG_HALF;
413 instr->regs[1]->flags |= IR3_REG_HALF;
414 break;
415 case 4:
416 /* src *= 4 => src <<= 2: */
417 immed = create_immed(block, 2);
418 immed->regs[0]->flags |= IR3_REG_HALF;
419
420 instr = ir3_SHL_B(block, instr, 0, immed, 0);
421 instr->regs[0]->flags |= IR3_REG_HALF;
422 instr->regs[1]->flags |= IR3_REG_HALF;
423 break;
424 default:
425 unreachable("bad align");
426 return NULL;
427 }
428
429 instr = ir3_MOV(block, instr, TYPE_S16);
430 instr->regs[0]->num = regid(REG_A0, 0);
431 instr->regs[0]->flags &= ~IR3_REG_SSA;
432 instr->regs[0]->flags |= IR3_REG_HALF;
433 instr->regs[1]->flags |= IR3_REG_HALF;
434
435 return instr;
436 }
437
438 static struct ir3_instruction *
439 create_addr1(struct ir3_block *block, unsigned const_val)
440 {
441
442 struct ir3_instruction *immed = create_immed(block, const_val);
443 struct ir3_instruction *instr = ir3_MOV(block, immed, TYPE_S16);
444 instr->regs[0]->num = regid(REG_A0, 1);
445 instr->regs[0]->flags &= ~IR3_REG_SSA;
446 instr->regs[0]->flags |= IR3_REG_HALF;
447 instr->regs[1]->flags |= IR3_REG_HALF;
448 return instr;
449 }
450
451 /* caches addr values to avoid generating multiple cov/shl/mova
452 * sequences for each use of a given NIR level src as address
453 */
454 struct ir3_instruction *
455 ir3_get_addr0(struct ir3_context *ctx, struct ir3_instruction *src, int align)
456 {
457 struct ir3_instruction *addr;
458 unsigned idx = align - 1;
459
460 compile_assert(ctx, idx < ARRAY_SIZE(ctx->addr0_ht));
461
462 if (!ctx->addr0_ht[idx]) {
463 ctx->addr0_ht[idx] = _mesa_hash_table_create(ctx,
464 _mesa_hash_pointer, _mesa_key_pointer_equal);
465 } else {
466 struct hash_entry *entry;
467 entry = _mesa_hash_table_search(ctx->addr0_ht[idx], src);
468 if (entry)
469 return entry->data;
470 }
471
472 addr = create_addr0(ctx->block, src, align);
473 _mesa_hash_table_insert(ctx->addr0_ht[idx], src, addr);
474
475 return addr;
476 }
477
478 /* Similar to ir3_get_addr0, but for a1.x. */
479 struct ir3_instruction *
480 ir3_get_addr1(struct ir3_context *ctx, unsigned const_val)
481 {
482 struct ir3_instruction *addr;
483
484 if (!ctx->addr1_ht) {
485 ctx->addr1_ht = _mesa_hash_table_u64_create(ctx);
486 } else {
487 addr = _mesa_hash_table_u64_search(ctx->addr1_ht, const_val);
488 if (addr)
489 return addr;
490 }
491
492 addr = create_addr1(ctx->block, const_val);
493 _mesa_hash_table_u64_insert(ctx->addr1_ht, const_val, addr);
494
495 return addr;
496 }
497
498 struct ir3_instruction *
499 ir3_get_predicate(struct ir3_context *ctx, struct ir3_instruction *src)
500 {
501 struct ir3_block *b = ctx->block;
502 struct ir3_instruction *cond;
503
504 /* NOTE: only cmps.*.* can write p0.x: */
505 cond = ir3_CMPS_S(b, src, 0, create_immed(b, 0), 0);
506 cond->cat2.condition = IR3_COND_NE;
507
508 /* condition always goes in predicate register: */
509 cond->regs[0]->num = regid(REG_P0, 0);
510 cond->regs[0]->flags &= ~IR3_REG_SSA;
511
512 return cond;
513 }
514
515 /*
516 * Array helpers
517 */
518
519 void
520 ir3_declare_array(struct ir3_context *ctx, nir_register *reg)
521 {
522 struct ir3_array *arr = rzalloc(ctx, struct ir3_array);
523 arr->id = ++ctx->num_arrays;
524 /* NOTE: sometimes we get non array regs, for example for arrays of
525 * length 1. See fs-const-array-of-struct-of-array.shader_test. So
526 * treat a non-array as if it was an array of length 1.
527 *
528 * It would be nice if there was a nir pass to convert arrays of
529 * length 1 to ssa.
530 */
531 arr->length = reg->num_components * MAX2(1, reg->num_array_elems);
532 compile_assert(ctx, arr->length > 0);
533 arr->r = reg;
534 list_addtail(&arr->node, &ctx->ir->array_list);
535 }
536
537 struct ir3_array *
538 ir3_get_array(struct ir3_context *ctx, nir_register *reg)
539 {
540 foreach_array (arr, &ctx->ir->array_list) {
541 if (arr->r == reg)
542 return arr;
543 }
544 ir3_context_error(ctx, "bogus reg: %s\n", reg->name);
545 return NULL;
546 }
547
548 /* relative (indirect) if address!=NULL */
549 struct ir3_instruction *
550 ir3_create_array_load(struct ir3_context *ctx, struct ir3_array *arr, int n,
551 struct ir3_instruction *address, unsigned bitsize)
552 {
553 struct ir3_block *block = ctx->block;
554 struct ir3_instruction *mov;
555 struct ir3_register *src;
556 unsigned flags = 0;
557
558 mov = ir3_instr_create(block, OPC_MOV);
559 if (bitsize == 16) {
560 mov->cat1.src_type = TYPE_U16;
561 mov->cat1.dst_type = TYPE_U16;
562 flags |= IR3_REG_HALF;
563 arr->half = true;
564 } else {
565 mov->cat1.src_type = TYPE_U32;
566 mov->cat1.dst_type = TYPE_U32;
567 }
568
569 mov->barrier_class = IR3_BARRIER_ARRAY_R;
570 mov->barrier_conflict = IR3_BARRIER_ARRAY_W;
571 __ssa_dst(mov)->flags |= flags;
572 src = ir3_reg_create(mov, 0, IR3_REG_ARRAY |
573 COND(address, IR3_REG_RELATIV) | flags);
574 src->instr = arr->last_write;
575 src->size = arr->length;
576 src->array.id = arr->id;
577 src->array.offset = n;
578
579 if (address)
580 ir3_instr_set_address(mov, address);
581
582 return mov;
583 }
584
585 /* relative (indirect) if address!=NULL */
586 void
587 ir3_create_array_store(struct ir3_context *ctx, struct ir3_array *arr, int n,
588 struct ir3_instruction *src, struct ir3_instruction *address)
589 {
590 struct ir3_block *block = ctx->block;
591 struct ir3_instruction *mov;
592 struct ir3_register *dst;
593
594 /* if not relative store, don't create an extra mov, since that
595 * ends up being difficult for cp to remove.
596 *
597 * Also, don't skip the mov if the src is meta (like fanout/split),
598 * since that creates a situation that RA can't really handle properly.
599 */
600 if (!address && !is_meta(src)) {
601 dst = src->regs[0];
602
603 src->barrier_class |= IR3_BARRIER_ARRAY_W;
604 src->barrier_conflict |= IR3_BARRIER_ARRAY_R | IR3_BARRIER_ARRAY_W;
605
606 dst->flags |= IR3_REG_ARRAY;
607 dst->instr = arr->last_write;
608 dst->size = arr->length;
609 dst->array.id = arr->id;
610 dst->array.offset = n;
611
612 arr->last_write = src;
613
614 array_insert(block, block->keeps, src);
615
616 return;
617 }
618
619 mov = ir3_instr_create(block, OPC_MOV);
620 mov->cat1.src_type = TYPE_U32;
621 mov->cat1.dst_type = TYPE_U32;
622 mov->barrier_class = IR3_BARRIER_ARRAY_W;
623 mov->barrier_conflict = IR3_BARRIER_ARRAY_R | IR3_BARRIER_ARRAY_W;
624 dst = ir3_reg_create(mov, 0, IR3_REG_ARRAY |
625 COND(address, IR3_REG_RELATIV));
626 dst->instr = arr->last_write;
627 dst->size = arr->length;
628 dst->array.id = arr->id;
629 dst->array.offset = n;
630 ir3_reg_create(mov, 0, IR3_REG_SSA)->instr = src;
631
632 if (address)
633 ir3_instr_set_address(mov, address);
634
635 arr->last_write = mov;
636
637 /* the array store may only matter to something in an earlier
638 * block (ie. loops), but since arrays are not in SSA, depth
639 * pass won't know this.. so keep all array stores:
640 */
641 array_insert(block, block->keeps, mov);
642 }