ir3: Plumb through bindless support
[mesa.git] / src / freedreno / ir3 / ir3_context.c
1 /*
2 * Copyright (C) 2015-2018 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #include "ir3_compiler.h"
28 #include "ir3_context.h"
29 #include "ir3_image.h"
30 #include "ir3_shader.h"
31 #include "ir3_nir.h"
32
33 struct ir3_context *
34 ir3_context_init(struct ir3_compiler *compiler,
35 struct ir3_shader_variant *so)
36 {
37 struct ir3_context *ctx = rzalloc(NULL, struct ir3_context);
38
39 if (compiler->gpu_id >= 400) {
40 if (so->type == MESA_SHADER_VERTEX) {
41 ctx->astc_srgb = so->key.vastc_srgb;
42 } else if (so->type == MESA_SHADER_FRAGMENT) {
43 ctx->astc_srgb = so->key.fastc_srgb;
44 }
45
46 } else {
47 if (so->type == MESA_SHADER_VERTEX) {
48 ctx->samples = so->key.vsamples;
49 } else if (so->type == MESA_SHADER_FRAGMENT) {
50 ctx->samples = so->key.fsamples;
51 }
52 }
53
54 if (compiler->gpu_id >= 600) {
55 ctx->funcs = &ir3_a6xx_funcs;
56 } else if (compiler->gpu_id >= 400) {
57 ctx->funcs = &ir3_a4xx_funcs;
58 }
59
60 ctx->compiler = compiler;
61 ctx->so = so;
62 ctx->def_ht = _mesa_hash_table_create(ctx,
63 _mesa_hash_pointer, _mesa_key_pointer_equal);
64 ctx->block_ht = _mesa_hash_table_create(ctx,
65 _mesa_hash_pointer, _mesa_key_pointer_equal);
66
67 /* TODO: maybe generate some sort of bitmask of what key
68 * lowers vs what shader has (ie. no need to lower
69 * texture clamp lowering if no texture sample instrs)..
70 * although should be done further up the stack to avoid
71 * creating duplicate variants..
72 */
73
74 ctx->s = nir_shader_clone(ctx, so->shader->nir);
75 if (ir3_key_lowers_nir(&so->key))
76 ir3_optimize_nir(so->shader, ctx->s, &so->key);
77
78 /* this needs to be the last pass run, so do this here instead of
79 * in ir3_optimize_nir():
80 */
81 NIR_PASS_V(ctx->s, nir_lower_bool_to_bitsize);
82 bool progress = false;
83 NIR_PASS(progress, ctx->s, nir_lower_locals_to_regs);
84
85 /* we could need cleanup after lower_locals_to_regs */
86 while (progress) {
87 progress = false;
88 NIR_PASS(progress, ctx->s, nir_opt_algebraic);
89 NIR_PASS(progress, ctx->s, nir_opt_constant_folding);
90 }
91
92 /* We want to lower nir_op_imul as late as possible, to catch also
93 * those generated by earlier passes (e.g, nir_lower_locals_to_regs).
94 * However, we want a final swing of a few passes to have a chance
95 * at optimizing the result.
96 */
97 progress = false;
98 NIR_PASS(progress, ctx->s, ir3_nir_lower_imul);
99 while (progress) {
100 progress = false;
101 NIR_PASS(progress, ctx->s, nir_opt_algebraic);
102 NIR_PASS(progress, ctx->s, nir_opt_copy_prop_vars);
103 NIR_PASS(progress, ctx->s, nir_opt_dead_write_vars);
104 NIR_PASS(progress, ctx->s, nir_opt_dce);
105 NIR_PASS(progress, ctx->s, nir_opt_constant_folding);
106 }
107
108 /* Enable the texture pre-fetch feature only a4xx onwards. But
109 * only enable it on generations that have been tested:
110 */
111 if ((so->type == MESA_SHADER_FRAGMENT) && (compiler->gpu_id >= 600))
112 NIR_PASS_V(ctx->s, ir3_nir_lower_tex_prefetch);
113
114 NIR_PASS_V(ctx->s, nir_convert_from_ssa, true);
115
116 if (shader_debug_enabled(so->type)) {
117 fprintf(stdout, "NIR (final form) for %s shader %s:\n",
118 ir3_shader_stage(so), so->shader->nir->info.name);
119 nir_print_shader(ctx->s, stdout);
120 }
121
122 ir3_ibo_mapping_init(&so->image_mapping, ctx->s->info.num_textures);
123
124 return ctx;
125 }
126
127 void
128 ir3_context_free(struct ir3_context *ctx)
129 {
130 ralloc_free(ctx);
131 }
132
133 /*
134 * Misc helpers
135 */
136
137 /* allocate a n element value array (to be populated by caller) and
138 * insert in def_ht
139 */
140 struct ir3_instruction **
141 ir3_get_dst_ssa(struct ir3_context *ctx, nir_ssa_def *dst, unsigned n)
142 {
143 struct ir3_instruction **value =
144 ralloc_array(ctx->def_ht, struct ir3_instruction *, n);
145 _mesa_hash_table_insert(ctx->def_ht, dst, value);
146 return value;
147 }
148
149 struct ir3_instruction **
150 ir3_get_dst(struct ir3_context *ctx, nir_dest *dst, unsigned n)
151 {
152 struct ir3_instruction **value;
153
154 if (dst->is_ssa) {
155 value = ir3_get_dst_ssa(ctx, &dst->ssa, n);
156 } else {
157 value = ralloc_array(ctx, struct ir3_instruction *, n);
158 }
159
160 /* NOTE: in non-ssa case, we don't really need to store last_dst
161 * but this helps us catch cases where put_dst() call is forgotten
162 */
163 compile_assert(ctx, !ctx->last_dst);
164 ctx->last_dst = value;
165 ctx->last_dst_n = n;
166
167 return value;
168 }
169
170 struct ir3_instruction * const *
171 ir3_get_src(struct ir3_context *ctx, nir_src *src)
172 {
173 if (src->is_ssa) {
174 struct hash_entry *entry;
175 entry = _mesa_hash_table_search(ctx->def_ht, src->ssa);
176 compile_assert(ctx, entry);
177 return entry->data;
178 } else {
179 nir_register *reg = src->reg.reg;
180 struct ir3_array *arr = ir3_get_array(ctx, reg);
181 unsigned num_components = arr->r->num_components;
182 struct ir3_instruction *addr = NULL;
183 struct ir3_instruction **value =
184 ralloc_array(ctx, struct ir3_instruction *, num_components);
185
186 if (src->reg.indirect)
187 addr = ir3_get_addr0(ctx, ir3_get_src(ctx, src->reg.indirect)[0],
188 reg->num_components);
189
190 for (unsigned i = 0; i < num_components; i++) {
191 unsigned n = src->reg.base_offset * reg->num_components + i;
192 compile_assert(ctx, n < arr->length);
193 value[i] = ir3_create_array_load(ctx, arr, n, addr, reg->bit_size);
194 }
195
196 return value;
197 }
198 }
199
200 void
201 ir3_put_dst(struct ir3_context *ctx, nir_dest *dst)
202 {
203 unsigned bit_size = nir_dest_bit_size(*dst);
204
205 /* add extra mov if dst value is HIGH reg.. in some cases not all
206 * instructions can read from HIGH regs, in cases where they can
207 * ir3_cp will clean up the extra mov:
208 */
209 for (unsigned i = 0; i < ctx->last_dst_n; i++) {
210 if (!ctx->last_dst[i])
211 continue;
212 if (ctx->last_dst[i]->regs[0]->flags & IR3_REG_HIGH) {
213 ctx->last_dst[i] = ir3_MOV(ctx->block, ctx->last_dst[i], TYPE_U32);
214 }
215 }
216
217 if (bit_size < 32) {
218 for (unsigned i = 0; i < ctx->last_dst_n; i++) {
219 struct ir3_instruction *dst = ctx->last_dst[i];
220 dst->regs[0]->flags |= IR3_REG_HALF;
221 if (ctx->last_dst[i]->opc == OPC_META_SPLIT)
222 dst->regs[1]->instr->regs[0]->flags |= IR3_REG_HALF;
223 }
224 }
225
226 if (!dst->is_ssa) {
227 nir_register *reg = dst->reg.reg;
228 struct ir3_array *arr = ir3_get_array(ctx, reg);
229 unsigned num_components = ctx->last_dst_n;
230 struct ir3_instruction *addr = NULL;
231
232 if (dst->reg.indirect)
233 addr = ir3_get_addr0(ctx, ir3_get_src(ctx, dst->reg.indirect)[0],
234 reg->num_components);
235
236 for (unsigned i = 0; i < num_components; i++) {
237 unsigned n = dst->reg.base_offset * reg->num_components + i;
238 compile_assert(ctx, n < arr->length);
239 if (!ctx->last_dst[i])
240 continue;
241 ir3_create_array_store(ctx, arr, n, ctx->last_dst[i], addr);
242 }
243
244 ralloc_free(ctx->last_dst);
245 }
246
247 ctx->last_dst = NULL;
248 ctx->last_dst_n = 0;
249 }
250
251 static unsigned
252 dest_flags(struct ir3_instruction *instr)
253 {
254 return instr->regs[0]->flags & (IR3_REG_HALF | IR3_REG_HIGH);
255 }
256
257 struct ir3_instruction *
258 ir3_create_collect(struct ir3_context *ctx, struct ir3_instruction *const *arr,
259 unsigned arrsz)
260 {
261 struct ir3_block *block = ctx->block;
262 struct ir3_instruction *collect;
263
264 if (arrsz == 0)
265 return NULL;
266
267 unsigned flags = dest_flags(arr[0]);
268
269 collect = ir3_instr_create2(block, OPC_META_COLLECT, 1 + arrsz);
270 __ssa_dst(collect)->flags |= flags;
271 for (unsigned i = 0; i < arrsz; i++) {
272 struct ir3_instruction *elem = arr[i];
273
274 /* Since arrays are pre-colored in RA, we can't assume that
275 * things will end up in the right place. (Ie. if a collect
276 * joins elements from two different arrays.) So insert an
277 * extra mov.
278 *
279 * We could possibly skip this if all the collected elements
280 * are contiguous elements in a single array.. not sure how
281 * likely that is to happen.
282 *
283 * Fixes a problem with glamor shaders, that in effect do
284 * something like:
285 *
286 * if (foo)
287 * texcoord = ..
288 * else
289 * texcoord = ..
290 * color = texture2D(tex, texcoord);
291 *
292 * In this case, texcoord will end up as nir registers (which
293 * translate to ir3 array's of length 1. And we can't assume
294 * the two (or more) arrays will get allocated in consecutive
295 * scalar registers.
296 *
297 */
298 if (elem->regs[0]->flags & IR3_REG_ARRAY) {
299 type_t type = (flags & IR3_REG_HALF) ? TYPE_U16 : TYPE_U32;
300 elem = ir3_MOV(block, elem, type);
301 }
302
303 compile_assert(ctx, dest_flags(elem) == flags);
304 __ssa_src(collect, elem, flags);
305 }
306
307 collect->regs[0]->wrmask = MASK(arrsz);
308
309 return collect;
310 }
311
312 /* helper for instructions that produce multiple consecutive scalar
313 * outputs which need to have a split meta instruction inserted
314 */
315 void
316 ir3_split_dest(struct ir3_block *block, struct ir3_instruction **dst,
317 struct ir3_instruction *src, unsigned base, unsigned n)
318 {
319 struct ir3_instruction *prev = NULL;
320
321 if ((n == 1) && (src->regs[0]->wrmask == 0x1)) {
322 dst[0] = src;
323 return;
324 }
325
326 if (src->opc == OPC_META_COLLECT) {
327 debug_assert((base + n) < src->regs_count);
328
329 for (int i = 0; i < n; i++) {
330 dst[i] = ssa(src->regs[i + base + 1]);
331 }
332
333 return;
334 }
335
336 unsigned flags = dest_flags(src);
337
338 for (int i = 0, j = 0; i < n; i++) {
339 struct ir3_instruction *split =
340 ir3_instr_create(block, OPC_META_SPLIT);
341 __ssa_dst(split)->flags |= flags;
342 __ssa_src(split, src, flags);
343 split->split.off = i + base;
344
345 if (prev) {
346 split->cp.left = prev;
347 split->cp.left_cnt++;
348 prev->cp.right = split;
349 prev->cp.right_cnt++;
350 }
351 prev = split;
352
353 if (src->regs[0]->wrmask & (1 << (i + base)))
354 dst[j++] = split;
355 }
356 }
357
358 NORETURN void
359 ir3_context_error(struct ir3_context *ctx, const char *format, ...)
360 {
361 struct hash_table *errors = NULL;
362 va_list ap;
363 va_start(ap, format);
364 if (ctx->cur_instr) {
365 errors = _mesa_hash_table_create(NULL,
366 _mesa_hash_pointer,
367 _mesa_key_pointer_equal);
368 char *msg = ralloc_vasprintf(errors, format, ap);
369 _mesa_hash_table_insert(errors, ctx->cur_instr, msg);
370 } else {
371 _debug_vprintf(format, ap);
372 }
373 va_end(ap);
374 nir_print_shader_annotated(ctx->s, stdout, errors);
375 ralloc_free(errors);
376 ctx->error = true;
377 unreachable("");
378 }
379
380 static struct ir3_instruction *
381 create_addr0(struct ir3_block *block, struct ir3_instruction *src, int align)
382 {
383 struct ir3_instruction *instr, *immed;
384
385 /* TODO in at least some cases, the backend could probably be
386 * made clever enough to propagate IR3_REG_HALF..
387 */
388 instr = ir3_COV(block, src, TYPE_U32, TYPE_S16);
389 instr->regs[0]->flags |= IR3_REG_HALF;
390
391 switch(align){
392 case 1:
393 /* src *= 1: */
394 break;
395 case 2:
396 /* src *= 2 => src <<= 1: */
397 immed = create_immed(block, 1);
398 immed->regs[0]->flags |= IR3_REG_HALF;
399
400 instr = ir3_SHL_B(block, instr, 0, immed, 0);
401 instr->regs[0]->flags |= IR3_REG_HALF;
402 instr->regs[1]->flags |= IR3_REG_HALF;
403 break;
404 case 3:
405 /* src *= 3: */
406 immed = create_immed(block, 3);
407 immed->regs[0]->flags |= IR3_REG_HALF;
408
409 instr = ir3_MULL_U(block, instr, 0, immed, 0);
410 instr->regs[0]->flags |= IR3_REG_HALF;
411 instr->regs[1]->flags |= IR3_REG_HALF;
412 break;
413 case 4:
414 /* src *= 4 => src <<= 2: */
415 immed = create_immed(block, 2);
416 immed->regs[0]->flags |= IR3_REG_HALF;
417
418 instr = ir3_SHL_B(block, instr, 0, immed, 0);
419 instr->regs[0]->flags |= IR3_REG_HALF;
420 instr->regs[1]->flags |= IR3_REG_HALF;
421 break;
422 default:
423 unreachable("bad align");
424 return NULL;
425 }
426
427 instr = ir3_MOV(block, instr, TYPE_S16);
428 instr->regs[0]->num = regid(REG_A0, 0);
429 instr->regs[0]->flags &= ~IR3_REG_SSA;
430 instr->regs[0]->flags |= IR3_REG_HALF;
431 instr->regs[1]->flags |= IR3_REG_HALF;
432
433 return instr;
434 }
435
436 static struct ir3_instruction *
437 create_addr1(struct ir3_block *block, unsigned const_val)
438 {
439
440 struct ir3_instruction *immed = create_immed(block, const_val);
441 struct ir3_instruction *instr = ir3_MOV(block, immed, TYPE_S16);
442 instr->regs[0]->num = regid(REG_A0, 1);
443 instr->regs[0]->flags &= ~IR3_REG_SSA;
444 instr->regs[0]->flags |= IR3_REG_HALF;
445 instr->regs[1]->flags |= IR3_REG_HALF;
446 return instr;
447 }
448
449 /* caches addr values to avoid generating multiple cov/shl/mova
450 * sequences for each use of a given NIR level src as address
451 */
452 struct ir3_instruction *
453 ir3_get_addr0(struct ir3_context *ctx, struct ir3_instruction *src, int align)
454 {
455 struct ir3_instruction *addr;
456 unsigned idx = align - 1;
457
458 compile_assert(ctx, idx < ARRAY_SIZE(ctx->addr0_ht));
459
460 if (!ctx->addr0_ht[idx]) {
461 ctx->addr0_ht[idx] = _mesa_hash_table_create(ctx,
462 _mesa_hash_pointer, _mesa_key_pointer_equal);
463 } else {
464 struct hash_entry *entry;
465 entry = _mesa_hash_table_search(ctx->addr0_ht[idx], src);
466 if (entry)
467 return entry->data;
468 }
469
470 addr = create_addr0(ctx->block, src, align);
471 _mesa_hash_table_insert(ctx->addr0_ht[idx], src, addr);
472
473 return addr;
474 }
475
476 /* Similar to ir3_get_addr0, but for a1.x. */
477 struct ir3_instruction *
478 ir3_get_addr1(struct ir3_context *ctx, unsigned const_val)
479 {
480 struct ir3_instruction *addr;
481
482 if (!ctx->addr1_ht) {
483 ctx->addr1_ht = _mesa_hash_table_u64_create(ctx);
484 } else {
485 addr = _mesa_hash_table_u64_search(ctx->addr1_ht, const_val);
486 if (addr)
487 return addr;
488 }
489
490 addr = create_addr1(ctx->block, const_val);
491 _mesa_hash_table_u64_insert(ctx->addr1_ht, const_val, addr);
492
493 return addr;
494 }
495
496 struct ir3_instruction *
497 ir3_get_predicate(struct ir3_context *ctx, struct ir3_instruction *src)
498 {
499 struct ir3_block *b = ctx->block;
500 struct ir3_instruction *cond;
501
502 /* NOTE: only cmps.*.* can write p0.x: */
503 cond = ir3_CMPS_S(b, src, 0, create_immed(b, 0), 0);
504 cond->cat2.condition = IR3_COND_NE;
505
506 /* condition always goes in predicate register: */
507 cond->regs[0]->num = regid(REG_P0, 0);
508 cond->regs[0]->flags &= ~IR3_REG_SSA;
509
510 return cond;
511 }
512
513 /*
514 * Array helpers
515 */
516
517 void
518 ir3_declare_array(struct ir3_context *ctx, nir_register *reg)
519 {
520 struct ir3_array *arr = rzalloc(ctx, struct ir3_array);
521 arr->id = ++ctx->num_arrays;
522 /* NOTE: sometimes we get non array regs, for example for arrays of
523 * length 1. See fs-const-array-of-struct-of-array.shader_test. So
524 * treat a non-array as if it was an array of length 1.
525 *
526 * It would be nice if there was a nir pass to convert arrays of
527 * length 1 to ssa.
528 */
529 arr->length = reg->num_components * MAX2(1, reg->num_array_elems);
530 compile_assert(ctx, arr->length > 0);
531 arr->r = reg;
532 list_addtail(&arr->node, &ctx->ir->array_list);
533 }
534
535 struct ir3_array *
536 ir3_get_array(struct ir3_context *ctx, nir_register *reg)
537 {
538 foreach_array (arr, &ctx->ir->array_list) {
539 if (arr->r == reg)
540 return arr;
541 }
542 ir3_context_error(ctx, "bogus reg: %s\n", reg->name);
543 return NULL;
544 }
545
546 /* relative (indirect) if address!=NULL */
547 struct ir3_instruction *
548 ir3_create_array_load(struct ir3_context *ctx, struct ir3_array *arr, int n,
549 struct ir3_instruction *address, unsigned bitsize)
550 {
551 struct ir3_block *block = ctx->block;
552 struct ir3_instruction *mov;
553 struct ir3_register *src;
554 unsigned flags = 0;
555
556 mov = ir3_instr_create(block, OPC_MOV);
557 if (bitsize < 32) {
558 mov->cat1.src_type = TYPE_U16;
559 mov->cat1.dst_type = TYPE_U16;
560 flags |= IR3_REG_HALF;
561 arr->half = true;
562 } else {
563 mov->cat1.src_type = TYPE_U32;
564 mov->cat1.dst_type = TYPE_U32;
565 }
566
567 mov->barrier_class = IR3_BARRIER_ARRAY_R;
568 mov->barrier_conflict = IR3_BARRIER_ARRAY_W;
569 __ssa_dst(mov)->flags |= flags;
570 src = ir3_reg_create(mov, 0, IR3_REG_ARRAY |
571 COND(address, IR3_REG_RELATIV) | flags);
572 src->instr = arr->last_write;
573 src->size = arr->length;
574 src->array.id = arr->id;
575 src->array.offset = n;
576
577 if (address)
578 ir3_instr_set_address(mov, address);
579
580 return mov;
581 }
582
583 /* relative (indirect) if address!=NULL */
584 void
585 ir3_create_array_store(struct ir3_context *ctx, struct ir3_array *arr, int n,
586 struct ir3_instruction *src, struct ir3_instruction *address)
587 {
588 struct ir3_block *block = ctx->block;
589 struct ir3_instruction *mov;
590 struct ir3_register *dst;
591
592 /* if not relative store, don't create an extra mov, since that
593 * ends up being difficult for cp to remove.
594 *
595 * Also, don't skip the mov if the src is meta (like fanout/split),
596 * since that creates a situation that RA can't really handle properly.
597 */
598 if (!address && !is_meta(src)) {
599 dst = src->regs[0];
600
601 src->barrier_class |= IR3_BARRIER_ARRAY_W;
602 src->barrier_conflict |= IR3_BARRIER_ARRAY_R | IR3_BARRIER_ARRAY_W;
603
604 dst->flags |= IR3_REG_ARRAY;
605 dst->instr = arr->last_write;
606 dst->size = arr->length;
607 dst->array.id = arr->id;
608 dst->array.offset = n;
609
610 arr->last_write = src;
611
612 array_insert(block, block->keeps, src);
613
614 return;
615 }
616
617 mov = ir3_instr_create(block, OPC_MOV);
618 mov->cat1.src_type = TYPE_U32;
619 mov->cat1.dst_type = TYPE_U32;
620 mov->barrier_class = IR3_BARRIER_ARRAY_W;
621 mov->barrier_conflict = IR3_BARRIER_ARRAY_R | IR3_BARRIER_ARRAY_W;
622 dst = ir3_reg_create(mov, 0, IR3_REG_ARRAY |
623 COND(address, IR3_REG_RELATIV));
624 dst->instr = arr->last_write;
625 dst->size = arr->length;
626 dst->array.id = arr->id;
627 dst->array.offset = n;
628 ir3_reg_create(mov, 0, IR3_REG_SSA)->instr = src;
629
630 if (address)
631 ir3_instr_set_address(mov, address);
632
633 arr->last_write = mov;
634
635 /* the array store may only matter to something in an earlier
636 * block (ie. loops), but since arrays are not in SSA, depth
637 * pass won't know this.. so keep all array stores:
638 */
639 array_insert(block, block->keeps, mov);
640 }