bdcf816bd120662119b94bf6d2878114a96de1b4
[mesa.git] / src / freedreno / ir3 / ir3_context.c
1 /*
2 * Copyright (C) 2015-2018 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #include "ir3_compiler.h"
28 #include "ir3_context.h"
29 #include "ir3_image.h"
30 #include "ir3_shader.h"
31 #include "ir3_nir.h"
32
33 struct ir3_context *
34 ir3_context_init(struct ir3_compiler *compiler,
35 struct ir3_shader_variant *so)
36 {
37 struct ir3_context *ctx = rzalloc(NULL, struct ir3_context);
38
39 if (compiler->gpu_id >= 400) {
40 if (so->type == MESA_SHADER_VERTEX) {
41 ctx->astc_srgb = so->key.vastc_srgb;
42 } else if (so->type == MESA_SHADER_FRAGMENT) {
43 ctx->astc_srgb = so->key.fastc_srgb;
44 }
45
46 } else {
47 if (so->type == MESA_SHADER_VERTEX) {
48 ctx->samples = so->key.vsamples;
49 } else if (so->type == MESA_SHADER_FRAGMENT) {
50 ctx->samples = so->key.fsamples;
51 }
52 }
53
54 if (compiler->gpu_id >= 600) {
55 ctx->funcs = &ir3_a6xx_funcs;
56 } else if (compiler->gpu_id >= 400) {
57 ctx->funcs = &ir3_a4xx_funcs;
58 }
59
60 ctx->compiler = compiler;
61 ctx->so = so;
62 ctx->def_ht = _mesa_hash_table_create(ctx,
63 _mesa_hash_pointer, _mesa_key_pointer_equal);
64 ctx->block_ht = _mesa_hash_table_create(ctx,
65 _mesa_hash_pointer, _mesa_key_pointer_equal);
66
67 /* TODO: maybe generate some sort of bitmask of what key
68 * lowers vs what shader has (ie. no need to lower
69 * texture clamp lowering if no texture sample instrs)..
70 * although should be done further up the stack to avoid
71 * creating duplicate variants..
72 */
73
74 ctx->s = nir_shader_clone(ctx, so->shader->nir);
75 if (ir3_key_lowers_nir(&so->key))
76 ir3_optimize_nir(so->shader, ctx->s, &so->key);
77
78 /* this needs to be the last pass run, so do this here instead of
79 * in ir3_optimize_nir():
80 */
81 NIR_PASS_V(ctx->s, nir_lower_bool_to_int32);
82 NIR_PASS_V(ctx->s, nir_lower_locals_to_regs);
83
84 /* We want to lower nir_op_imul as late as possible, to catch also
85 * those generated by earlier passes (e.g, nir_lower_locals_to_regs).
86 * However, we want a final swing of a few passes to have a chance
87 * at optimizing the result.
88 */
89 bool progress = false;
90 NIR_PASS(progress, ctx->s, ir3_nir_lower_imul);
91 if (progress) {
92 NIR_PASS_V(ctx->s, nir_opt_algebraic);
93 NIR_PASS_V(ctx->s, nir_opt_copy_prop_vars);
94 NIR_PASS_V(ctx->s, nir_opt_dead_write_vars);
95 NIR_PASS_V(ctx->s, nir_opt_dce);
96 NIR_PASS_V(ctx->s, nir_opt_constant_folding);
97 }
98
99 /* Enable the texture pre-fetch feature only a4xx onwards. But
100 * only enable it on generations that have been tested:
101 */
102 if ((so->type == MESA_SHADER_FRAGMENT) && (compiler->gpu_id >= 600))
103 NIR_PASS_V(ctx->s, ir3_nir_lower_tex_prefetch);
104
105 NIR_PASS_V(ctx->s, nir_convert_from_ssa, true);
106
107 if (shader_debug_enabled(so->type)) {
108 fprintf(stdout, "NIR (final form) for %s shader %s:\n",
109 ir3_shader_stage(so), so->shader->nir->info.name);
110 nir_print_shader(ctx->s, stdout);
111 }
112
113 ir3_ibo_mapping_init(&so->image_mapping, ctx->s->info.num_textures);
114
115 return ctx;
116 }
117
118 void
119 ir3_context_free(struct ir3_context *ctx)
120 {
121 ralloc_free(ctx);
122 }
123
124 /*
125 * Misc helpers
126 */
127
128 /* allocate a n element value array (to be populated by caller) and
129 * insert in def_ht
130 */
131 struct ir3_instruction **
132 ir3_get_dst_ssa(struct ir3_context *ctx, nir_ssa_def *dst, unsigned n)
133 {
134 struct ir3_instruction **value =
135 ralloc_array(ctx->def_ht, struct ir3_instruction *, n);
136 _mesa_hash_table_insert(ctx->def_ht, dst, value);
137 return value;
138 }
139
140 struct ir3_instruction **
141 ir3_get_dst(struct ir3_context *ctx, nir_dest *dst, unsigned n)
142 {
143 struct ir3_instruction **value;
144
145 if (dst->is_ssa) {
146 value = ir3_get_dst_ssa(ctx, &dst->ssa, n);
147 } else {
148 value = ralloc_array(ctx, struct ir3_instruction *, n);
149 }
150
151 /* NOTE: in non-ssa case, we don't really need to store last_dst
152 * but this helps us catch cases where put_dst() call is forgotten
153 */
154 compile_assert(ctx, !ctx->last_dst);
155 ctx->last_dst = value;
156 ctx->last_dst_n = n;
157
158 return value;
159 }
160
161 struct ir3_instruction * const *
162 ir3_get_src(struct ir3_context *ctx, nir_src *src)
163 {
164 if (src->is_ssa) {
165 struct hash_entry *entry;
166 entry = _mesa_hash_table_search(ctx->def_ht, src->ssa);
167 compile_assert(ctx, entry);
168 return entry->data;
169 } else {
170 nir_register *reg = src->reg.reg;
171 struct ir3_array *arr = ir3_get_array(ctx, reg);
172 unsigned num_components = arr->r->num_components;
173 struct ir3_instruction *addr = NULL;
174 struct ir3_instruction **value =
175 ralloc_array(ctx, struct ir3_instruction *, num_components);
176
177 if (src->reg.indirect)
178 addr = ir3_get_addr(ctx, ir3_get_src(ctx, src->reg.indirect)[0],
179 reg->num_components);
180
181 for (unsigned i = 0; i < num_components; i++) {
182 unsigned n = src->reg.base_offset * reg->num_components + i;
183 compile_assert(ctx, n < arr->length);
184 value[i] = ir3_create_array_load(ctx, arr, n, addr, reg->bit_size);
185 }
186
187 return value;
188 }
189 }
190
191 void
192 ir3_put_dst(struct ir3_context *ctx, nir_dest *dst)
193 {
194 unsigned bit_size = nir_dest_bit_size(*dst);
195
196 /* add extra mov if dst value is HIGH reg.. in some cases not all
197 * instructions can read from HIGH regs, in cases where they can
198 * ir3_cp will clean up the extra mov:
199 */
200 for (unsigned i = 0; i < ctx->last_dst_n; i++) {
201 if (!ctx->last_dst[i])
202 continue;
203 if (ctx->last_dst[i]->regs[0]->flags & IR3_REG_HIGH) {
204 ctx->last_dst[i] = ir3_MOV(ctx->block, ctx->last_dst[i], TYPE_U32);
205 }
206 }
207
208 if (bit_size < 32) {
209 for (unsigned i = 0; i < ctx->last_dst_n; i++) {
210 struct ir3_instruction *dst = ctx->last_dst[i];
211 dst->regs[0]->flags |= IR3_REG_HALF;
212 if (ctx->last_dst[i]->opc == OPC_META_FO)
213 dst->regs[1]->instr->regs[0]->flags |= IR3_REG_HALF;
214 }
215 }
216
217 if (!dst->is_ssa) {
218 nir_register *reg = dst->reg.reg;
219 struct ir3_array *arr = ir3_get_array(ctx, reg);
220 unsigned num_components = ctx->last_dst_n;
221 struct ir3_instruction *addr = NULL;
222
223 if (dst->reg.indirect)
224 addr = ir3_get_addr(ctx, ir3_get_src(ctx, dst->reg.indirect)[0],
225 reg->num_components);
226
227 for (unsigned i = 0; i < num_components; i++) {
228 unsigned n = dst->reg.base_offset * reg->num_components + i;
229 compile_assert(ctx, n < arr->length);
230 if (!ctx->last_dst[i])
231 continue;
232 ir3_create_array_store(ctx, arr, n, ctx->last_dst[i], addr);
233 }
234
235 ralloc_free(ctx->last_dst);
236 }
237
238 ctx->last_dst = NULL;
239 ctx->last_dst_n = 0;
240 }
241
242 static unsigned
243 dest_flags(struct ir3_instruction *instr)
244 {
245 return instr->regs[0]->flags & (IR3_REG_HALF | IR3_REG_HIGH);
246 }
247
248 struct ir3_instruction *
249 ir3_create_collect(struct ir3_context *ctx, struct ir3_instruction *const *arr,
250 unsigned arrsz)
251 {
252 struct ir3_block *block = ctx->block;
253 struct ir3_instruction *collect;
254
255 if (arrsz == 0)
256 return NULL;
257
258 unsigned flags = dest_flags(arr[0]);
259
260 collect = ir3_instr_create2(block, OPC_META_FI, 1 + arrsz);
261 ir3_reg_create(collect, 0, flags); /* dst */
262 for (unsigned i = 0; i < arrsz; i++) {
263 struct ir3_instruction *elem = arr[i];
264
265 /* Since arrays are pre-colored in RA, we can't assume that
266 * things will end up in the right place. (Ie. if a collect
267 * joins elements from two different arrays.) So insert an
268 * extra mov.
269 *
270 * We could possibly skip this if all the collected elements
271 * are contiguous elements in a single array.. not sure how
272 * likely that is to happen.
273 *
274 * Fixes a problem with glamor shaders, that in effect do
275 * something like:
276 *
277 * if (foo)
278 * texcoord = ..
279 * else
280 * texcoord = ..
281 * color = texture2D(tex, texcoord);
282 *
283 * In this case, texcoord will end up as nir registers (which
284 * translate to ir3 array's of length 1. And we can't assume
285 * the two (or more) arrays will get allocated in consecutive
286 * scalar registers.
287 *
288 */
289 if (elem->regs[0]->flags & IR3_REG_ARRAY) {
290 type_t type = (flags & IR3_REG_HALF) ? TYPE_U16 : TYPE_U32;
291 elem = ir3_MOV(block, elem, type);
292 }
293
294 compile_assert(ctx, dest_flags(elem) == flags);
295 ir3_reg_create(collect, 0, IR3_REG_SSA | flags)->instr = elem;
296 }
297
298 collect->regs[0]->wrmask = MASK(arrsz);
299
300 return collect;
301 }
302
303 /* helper for instructions that produce multiple consecutive scalar
304 * outputs which need to have a split/fanout meta instruction inserted
305 */
306 void
307 ir3_split_dest(struct ir3_block *block, struct ir3_instruction **dst,
308 struct ir3_instruction *src, unsigned base, unsigned n)
309 {
310 struct ir3_instruction *prev = NULL;
311
312 if ((n == 1) && (src->regs[0]->wrmask == 0x1)) {
313 dst[0] = src;
314 return;
315 }
316
317 unsigned flags = dest_flags(src);
318
319 for (int i = 0, j = 0; i < n; i++) {
320 struct ir3_instruction *split = ir3_instr_create(block, OPC_META_FO);
321 ir3_reg_create(split, 0, IR3_REG_SSA | flags);
322 ir3_reg_create(split, 0, IR3_REG_SSA | flags)->instr = src;
323 split->fo.off = i + base;
324
325 if (prev) {
326 split->cp.left = prev;
327 split->cp.left_cnt++;
328 prev->cp.right = split;
329 prev->cp.right_cnt++;
330 }
331 prev = split;
332
333 if (src->regs[0]->wrmask & (1 << (i + base)))
334 dst[j++] = split;
335 }
336 }
337
338 NORETURN void
339 ir3_context_error(struct ir3_context *ctx, const char *format, ...)
340 {
341 struct hash_table *errors = NULL;
342 va_list ap;
343 va_start(ap, format);
344 if (ctx->cur_instr) {
345 errors = _mesa_hash_table_create(NULL,
346 _mesa_hash_pointer,
347 _mesa_key_pointer_equal);
348 char *msg = ralloc_vasprintf(errors, format, ap);
349 _mesa_hash_table_insert(errors, ctx->cur_instr, msg);
350 } else {
351 _debug_vprintf(format, ap);
352 }
353 va_end(ap);
354 nir_print_shader_annotated(ctx->s, stdout, errors);
355 ralloc_free(errors);
356 ctx->error = true;
357 unreachable("");
358 }
359
360 static struct ir3_instruction *
361 create_addr(struct ir3_block *block, struct ir3_instruction *src, int align)
362 {
363 struct ir3_instruction *instr, *immed;
364
365 /* TODO in at least some cases, the backend could probably be
366 * made clever enough to propagate IR3_REG_HALF..
367 */
368 instr = ir3_COV(block, src, TYPE_U32, TYPE_S16);
369 instr->regs[0]->flags |= IR3_REG_HALF;
370
371 switch(align){
372 case 1:
373 /* src *= 1: */
374 break;
375 case 2:
376 /* src *= 2 => src <<= 1: */
377 immed = create_immed(block, 1);
378 immed->regs[0]->flags |= IR3_REG_HALF;
379
380 instr = ir3_SHL_B(block, instr, 0, immed, 0);
381 instr->regs[0]->flags |= IR3_REG_HALF;
382 instr->regs[1]->flags |= IR3_REG_HALF;
383 break;
384 case 3:
385 /* src *= 3: */
386 immed = create_immed(block, 3);
387 immed->regs[0]->flags |= IR3_REG_HALF;
388
389 instr = ir3_MULL_U(block, instr, 0, immed, 0);
390 instr->regs[0]->flags |= IR3_REG_HALF;
391 instr->regs[1]->flags |= IR3_REG_HALF;
392 break;
393 case 4:
394 /* src *= 4 => src <<= 2: */
395 immed = create_immed(block, 2);
396 immed->regs[0]->flags |= IR3_REG_HALF;
397
398 instr = ir3_SHL_B(block, instr, 0, immed, 0);
399 instr->regs[0]->flags |= IR3_REG_HALF;
400 instr->regs[1]->flags |= IR3_REG_HALF;
401 break;
402 default:
403 unreachable("bad align");
404 return NULL;
405 }
406
407 instr = ir3_MOV(block, instr, TYPE_S16);
408 instr->regs[0]->num = regid(REG_A0, 0);
409 instr->regs[0]->flags |= IR3_REG_HALF;
410 instr->regs[1]->flags |= IR3_REG_HALF;
411
412 return instr;
413 }
414
415 /* caches addr values to avoid generating multiple cov/shl/mova
416 * sequences for each use of a given NIR level src as address
417 */
418 struct ir3_instruction *
419 ir3_get_addr(struct ir3_context *ctx, struct ir3_instruction *src, int align)
420 {
421 struct ir3_instruction *addr;
422 unsigned idx = align - 1;
423
424 compile_assert(ctx, idx < ARRAY_SIZE(ctx->addr_ht));
425
426 if (!ctx->addr_ht[idx]) {
427 ctx->addr_ht[idx] = _mesa_hash_table_create(ctx,
428 _mesa_hash_pointer, _mesa_key_pointer_equal);
429 } else {
430 struct hash_entry *entry;
431 entry = _mesa_hash_table_search(ctx->addr_ht[idx], src);
432 if (entry)
433 return entry->data;
434 }
435
436 addr = create_addr(ctx->block, src, align);
437 _mesa_hash_table_insert(ctx->addr_ht[idx], src, addr);
438
439 return addr;
440 }
441
442 struct ir3_instruction *
443 ir3_get_predicate(struct ir3_context *ctx, struct ir3_instruction *src)
444 {
445 struct ir3_block *b = ctx->block;
446 struct ir3_instruction *cond;
447
448 /* NOTE: only cmps.*.* can write p0.x: */
449 cond = ir3_CMPS_S(b, src, 0, create_immed(b, 0), 0);
450 cond->cat2.condition = IR3_COND_NE;
451
452 /* condition always goes in predicate register: */
453 cond->regs[0]->num = regid(REG_P0, 0);
454
455 return cond;
456 }
457
458 /*
459 * Array helpers
460 */
461
462 void
463 ir3_declare_array(struct ir3_context *ctx, nir_register *reg)
464 {
465 struct ir3_array *arr = rzalloc(ctx, struct ir3_array);
466 arr->id = ++ctx->num_arrays;
467 /* NOTE: sometimes we get non array regs, for example for arrays of
468 * length 1. See fs-const-array-of-struct-of-array.shader_test. So
469 * treat a non-array as if it was an array of length 1.
470 *
471 * It would be nice if there was a nir pass to convert arrays of
472 * length 1 to ssa.
473 */
474 arr->length = reg->num_components * MAX2(1, reg->num_array_elems);
475 compile_assert(ctx, arr->length > 0);
476 arr->r = reg;
477 list_addtail(&arr->node, &ctx->ir->array_list);
478 }
479
480 struct ir3_array *
481 ir3_get_array(struct ir3_context *ctx, nir_register *reg)
482 {
483 list_for_each_entry (struct ir3_array, arr, &ctx->ir->array_list, node) {
484 if (arr->r == reg)
485 return arr;
486 }
487 ir3_context_error(ctx, "bogus reg: %s\n", reg->name);
488 return NULL;
489 }
490
491 /* relative (indirect) if address!=NULL */
492 struct ir3_instruction *
493 ir3_create_array_load(struct ir3_context *ctx, struct ir3_array *arr, int n,
494 struct ir3_instruction *address, unsigned bitsize)
495 {
496 struct ir3_block *block = ctx->block;
497 struct ir3_instruction *mov;
498 struct ir3_register *src;
499 unsigned flags = 0;
500
501 mov = ir3_instr_create(block, OPC_MOV);
502 if (bitsize < 32) {
503 mov->cat1.src_type = TYPE_U16;
504 mov->cat1.dst_type = TYPE_U16;
505 flags |= IR3_REG_HALF;
506 } else {
507 mov->cat1.src_type = TYPE_U32;
508 mov->cat1.dst_type = TYPE_U32;
509 }
510
511 mov->barrier_class = IR3_BARRIER_ARRAY_R;
512 mov->barrier_conflict = IR3_BARRIER_ARRAY_W;
513 ir3_reg_create(mov, 0, flags);
514 src = ir3_reg_create(mov, 0, IR3_REG_ARRAY |
515 COND(address, IR3_REG_RELATIV) | flags);
516 src->instr = arr->last_write;
517 src->size = arr->length;
518 src->array.id = arr->id;
519 src->array.offset = n;
520
521 if (address)
522 ir3_instr_set_address(mov, address);
523
524 return mov;
525 }
526
527 /* relative (indirect) if address!=NULL */
528 void
529 ir3_create_array_store(struct ir3_context *ctx, struct ir3_array *arr, int n,
530 struct ir3_instruction *src, struct ir3_instruction *address)
531 {
532 struct ir3_block *block = ctx->block;
533 struct ir3_instruction *mov;
534 struct ir3_register *dst;
535
536 /* if not relative store, don't create an extra mov, since that
537 * ends up being difficult for cp to remove.
538 *
539 * Also, don't skip the mov if the src is meta (like fanout/split),
540 * since that creates a situation that RA can't really handle properly.
541 */
542 if (!address && !is_meta(src)) {
543 dst = src->regs[0];
544
545 src->barrier_class |= IR3_BARRIER_ARRAY_W;
546 src->barrier_conflict |= IR3_BARRIER_ARRAY_R | IR3_BARRIER_ARRAY_W;
547
548 dst->flags |= IR3_REG_ARRAY;
549 dst->instr = arr->last_write;
550 dst->size = arr->length;
551 dst->array.id = arr->id;
552 dst->array.offset = n;
553
554 arr->last_write = src;
555
556 array_insert(block, block->keeps, src);
557
558 return;
559 }
560
561 mov = ir3_instr_create(block, OPC_MOV);
562 mov->cat1.src_type = TYPE_U32;
563 mov->cat1.dst_type = TYPE_U32;
564 mov->barrier_class = IR3_BARRIER_ARRAY_W;
565 mov->barrier_conflict = IR3_BARRIER_ARRAY_R | IR3_BARRIER_ARRAY_W;
566 dst = ir3_reg_create(mov, 0, IR3_REG_ARRAY |
567 COND(address, IR3_REG_RELATIV));
568 dst->instr = arr->last_write;
569 dst->size = arr->length;
570 dst->array.id = arr->id;
571 dst->array.offset = n;
572 ir3_reg_create(mov, 0, IR3_REG_SSA)->instr = src;
573
574 if (address)
575 ir3_instr_set_address(mov, address);
576
577 arr->last_write = mov;
578
579 /* the array store may only matter to something in an earlier
580 * block (ie. loops), but since arrays are not in SSA, depth
581 * pass won't know this.. so keep all array stores:
582 */
583 array_insert(block, block->keeps, mov);
584 }