freedreno/ir3: fix fallout of extra assert
[mesa.git] / src / freedreno / ir3 / ir3_context.c
1 /*
2 * Copyright (C) 2015-2018 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #include "util/u_math.h"
28
29 #include "ir3_compiler.h"
30 #include "ir3_context.h"
31 #include "ir3_shader.h"
32 #include "ir3_nir.h"
33
34 struct ir3_context *
35 ir3_context_init(struct ir3_compiler *compiler,
36 struct ir3_shader_variant *so)
37 {
38 struct ir3_context *ctx = rzalloc(NULL, struct ir3_context);
39
40 if (compiler->gpu_id >= 400) {
41 if (so->type == MESA_SHADER_VERTEX) {
42 ctx->astc_srgb = so->key.vastc_srgb;
43 } else if (so->type == MESA_SHADER_FRAGMENT) {
44 ctx->astc_srgb = so->key.fastc_srgb;
45 }
46
47 } else {
48 if (so->type == MESA_SHADER_VERTEX) {
49 ctx->samples = so->key.vsamples;
50 } else if (so->type == MESA_SHADER_FRAGMENT) {
51 ctx->samples = so->key.fsamples;
52 }
53 }
54
55 ctx->compiler = compiler;
56 ctx->so = so;
57 ctx->def_ht = _mesa_hash_table_create(ctx,
58 _mesa_hash_pointer, _mesa_key_pointer_equal);
59 ctx->block_ht = _mesa_hash_table_create(ctx,
60 _mesa_hash_pointer, _mesa_key_pointer_equal);
61
62 /* TODO: maybe generate some sort of bitmask of what key
63 * lowers vs what shader has (ie. no need to lower
64 * texture clamp lowering if no texture sample instrs)..
65 * although should be done further up the stack to avoid
66 * creating duplicate variants..
67 */
68
69 if (ir3_key_lowers_nir(&so->key)) {
70 nir_shader *s = nir_shader_clone(ctx, so->shader->nir);
71 ctx->s = ir3_optimize_nir(so->shader, s, &so->key);
72 } else {
73 /* fast-path for shader key that lowers nothing in NIR: */
74 ctx->s = nir_shader_clone(ctx, so->shader->nir);
75 }
76
77 /* this needs to be the last pass run, so do this here instead of
78 * in ir3_optimize_nir():
79 */
80 NIR_PASS_V(ctx->s, nir_lower_bool_to_int32);
81 NIR_PASS_V(ctx->s, nir_lower_locals_to_regs);
82 NIR_PASS_V(ctx->s, nir_convert_from_ssa, true);
83
84 if (ir3_shader_debug & IR3_DBG_DISASM) {
85 DBG("dump nir%dv%d: type=%d, k={cts=%u,hp=%u}",
86 so->shader->id, so->id, so->type,
87 so->key.color_two_side, so->key.half_precision);
88 nir_print_shader(ctx->s, stdout);
89 }
90
91 if (shader_debug_enabled(so->type)) {
92 fprintf(stderr, "NIR (final form) for %s shader:\n",
93 _mesa_shader_stage_to_string(so->type));
94 nir_print_shader(ctx->s, stderr);
95 }
96
97 ir3_nir_scan_driver_consts(ctx->s, &so->const_layout);
98
99 so->num_uniforms = ctx->s->num_uniforms;
100 so->num_ubos = ctx->s->info.num_ubos;
101
102 /* Layout of constant registers, each section aligned to vec4. Note
103 * that pointer size (ubo, etc) changes depending on generation.
104 *
105 * user consts
106 * UBO addresses
107 * SSBO sizes
108 * if (vertex shader) {
109 * driver params (IR3_DP_*)
110 * if (stream_output.num_outputs > 0)
111 * stream-out addresses
112 * }
113 * immediates
114 *
115 * Immediates go last mostly because they are inserted in the CP pass
116 * after the nir -> ir3 frontend.
117 */
118 unsigned constoff = align(ctx->s->num_uniforms, 4);
119 unsigned ptrsz = ir3_pointer_size(ctx);
120
121 memset(&so->constbase, ~0, sizeof(so->constbase));
122
123 if (so->num_ubos > 0) {
124 so->constbase.ubo = constoff;
125 constoff += align(ctx->s->info.num_ubos * ptrsz, 4) / 4;
126 }
127
128 if (so->const_layout.ssbo_size.count > 0) {
129 unsigned cnt = so->const_layout.ssbo_size.count;
130 so->constbase.ssbo_sizes = constoff;
131 constoff += align(cnt, 4) / 4;
132 }
133
134 if (so->const_layout.image_dims.count > 0) {
135 unsigned cnt = so->const_layout.image_dims.count;
136 so->constbase.image_dims = constoff;
137 constoff += align(cnt, 4) / 4;
138 }
139
140 unsigned num_driver_params = 0;
141 if (so->type == MESA_SHADER_VERTEX) {
142 num_driver_params = IR3_DP_VS_COUNT;
143 } else if (so->type == MESA_SHADER_COMPUTE) {
144 num_driver_params = IR3_DP_CS_COUNT;
145 }
146
147 so->constbase.driver_param = constoff;
148 constoff += align(num_driver_params, 4) / 4;
149
150 if ((so->type == MESA_SHADER_VERTEX) &&
151 (compiler->gpu_id < 500) &&
152 so->shader->stream_output.num_outputs > 0) {
153 so->constbase.tfbo = constoff;
154 constoff += align(IR3_MAX_SO_BUFFERS * ptrsz, 4) / 4;
155 }
156
157 so->constbase.immediate = constoff;
158
159 return ctx;
160 }
161
162 void
163 ir3_context_free(struct ir3_context *ctx)
164 {
165 ralloc_free(ctx);
166 }
167
168 /*
169 * Misc helpers
170 */
171
172 /* allocate a n element value array (to be populated by caller) and
173 * insert in def_ht
174 */
175 struct ir3_instruction **
176 ir3_get_dst_ssa(struct ir3_context *ctx, nir_ssa_def *dst, unsigned n)
177 {
178 struct ir3_instruction **value =
179 ralloc_array(ctx->def_ht, struct ir3_instruction *, n);
180 _mesa_hash_table_insert(ctx->def_ht, dst, value);
181 return value;
182 }
183
184 struct ir3_instruction **
185 ir3_get_dst(struct ir3_context *ctx, nir_dest *dst, unsigned n)
186 {
187 struct ir3_instruction **value;
188
189 if (dst->is_ssa) {
190 value = ir3_get_dst_ssa(ctx, &dst->ssa, n);
191 } else {
192 value = ralloc_array(ctx, struct ir3_instruction *, n);
193 }
194
195 /* NOTE: in non-ssa case, we don't really need to store last_dst
196 * but this helps us catch cases where put_dst() call is forgotten
197 */
198 compile_assert(ctx, !ctx->last_dst);
199 ctx->last_dst = value;
200 ctx->last_dst_n = n;
201
202 return value;
203 }
204
205 struct ir3_instruction * const *
206 ir3_get_src(struct ir3_context *ctx, nir_src *src)
207 {
208 if (src->is_ssa) {
209 struct hash_entry *entry;
210 entry = _mesa_hash_table_search(ctx->def_ht, src->ssa);
211 compile_assert(ctx, entry);
212 return entry->data;
213 } else {
214 nir_register *reg = src->reg.reg;
215 struct ir3_array *arr = ir3_get_array(ctx, reg);
216 unsigned num_components = arr->r->num_components;
217 struct ir3_instruction *addr = NULL;
218 struct ir3_instruction **value =
219 ralloc_array(ctx, struct ir3_instruction *, num_components);
220
221 if (src->reg.indirect)
222 addr = ir3_get_addr(ctx, ir3_get_src(ctx, src->reg.indirect)[0],
223 reg->num_components);
224
225 for (unsigned i = 0; i < num_components; i++) {
226 unsigned n = src->reg.base_offset * reg->num_components + i;
227 compile_assert(ctx, n < arr->length);
228 value[i] = ir3_create_array_load(ctx, arr, n, addr);
229 }
230
231 return value;
232 }
233 }
234
235 void
236 put_dst(struct ir3_context *ctx, nir_dest *dst)
237 {
238 unsigned bit_size = nir_dest_bit_size(*dst);
239
240 if (bit_size < 32) {
241 for (unsigned i = 0; i < ctx->last_dst_n; i++) {
242 struct ir3_instruction *dst = ctx->last_dst[i];
243 dst->regs[0]->flags |= IR3_REG_HALF;
244 if (ctx->last_dst[i]->opc == OPC_META_FO)
245 dst->regs[1]->instr->regs[0]->flags |= IR3_REG_HALF;
246 }
247 }
248
249 if (!dst->is_ssa) {
250 nir_register *reg = dst->reg.reg;
251 struct ir3_array *arr = ir3_get_array(ctx, reg);
252 unsigned num_components = ctx->last_dst_n;
253 struct ir3_instruction *addr = NULL;
254
255 if (dst->reg.indirect)
256 addr = ir3_get_addr(ctx, ir3_get_src(ctx, dst->reg.indirect)[0],
257 reg->num_components);
258
259 for (unsigned i = 0; i < num_components; i++) {
260 unsigned n = dst->reg.base_offset * reg->num_components + i;
261 compile_assert(ctx, n < arr->length);
262 if (!ctx->last_dst[i])
263 continue;
264 ir3_create_array_store(ctx, arr, n, ctx->last_dst[i], addr);
265 }
266
267 ralloc_free(ctx->last_dst);
268 }
269 ctx->last_dst = NULL;
270 ctx->last_dst_n = 0;
271 }
272
273 struct ir3_instruction *
274 ir3_create_collect(struct ir3_context *ctx, struct ir3_instruction *const *arr,
275 unsigned arrsz)
276 {
277 struct ir3_block *block = ctx->block;
278 struct ir3_instruction *collect;
279
280 if (arrsz == 0)
281 return NULL;
282
283 unsigned flags = arr[0]->regs[0]->flags & IR3_REG_HALF;
284
285 collect = ir3_instr_create2(block, OPC_META_FI, 1 + arrsz);
286 ir3_reg_create(collect, 0, flags); /* dst */
287 for (unsigned i = 0; i < arrsz; i++) {
288 struct ir3_instruction *elem = arr[i];
289
290 /* Since arrays are pre-colored in RA, we can't assume that
291 * things will end up in the right place. (Ie. if a collect
292 * joins elements from two different arrays.) So insert an
293 * extra mov.
294 *
295 * We could possibly skip this if all the collected elements
296 * are contiguous elements in a single array.. not sure how
297 * likely that is to happen.
298 *
299 * Fixes a problem with glamor shaders, that in effect do
300 * something like:
301 *
302 * if (foo)
303 * texcoord = ..
304 * else
305 * texcoord = ..
306 * color = texture2D(tex, texcoord);
307 *
308 * In this case, texcoord will end up as nir registers (which
309 * translate to ir3 array's of length 1. And we can't assume
310 * the two (or more) arrays will get allocated in consecutive
311 * scalar registers.
312 *
313 */
314 if (elem->regs[0]->flags & IR3_REG_ARRAY) {
315 type_t type = (flags & IR3_REG_HALF) ? TYPE_U16 : TYPE_U32;
316 elem = ir3_MOV(block, elem, type);
317 }
318
319 compile_assert(ctx, (elem->regs[0]->flags & IR3_REG_HALF) == flags);
320 ir3_reg_create(collect, 0, IR3_REG_SSA | flags)->instr = elem;
321 }
322
323 return collect;
324 }
325
326 /* helper for instructions that produce multiple consecutive scalar
327 * outputs which need to have a split/fanout meta instruction inserted
328 */
329 void
330 ir3_split_dest(struct ir3_block *block, struct ir3_instruction **dst,
331 struct ir3_instruction *src, unsigned base, unsigned n)
332 {
333 struct ir3_instruction *prev = NULL;
334
335 if ((n == 1) && (src->regs[0]->wrmask == 0x1)) {
336 dst[0] = src;
337 return;
338 }
339
340 for (int i = 0, j = 0; i < n; i++) {
341 struct ir3_instruction *split = ir3_instr_create(block, OPC_META_FO);
342 ir3_reg_create(split, 0, IR3_REG_SSA);
343 ir3_reg_create(split, 0, IR3_REG_SSA)->instr = src;
344 split->fo.off = i + base;
345
346 if (prev) {
347 split->cp.left = prev;
348 split->cp.left_cnt++;
349 prev->cp.right = split;
350 prev->cp.right_cnt++;
351 }
352 prev = split;
353
354 if (src->regs[0]->wrmask & (1 << (i + base)))
355 dst[j++] = split;
356 }
357 }
358
359 void
360 ir3_context_error(struct ir3_context *ctx, const char *format, ...)
361 {
362 struct hash_table *errors = NULL;
363 va_list ap;
364 va_start(ap, format);
365 if (ctx->cur_instr) {
366 errors = _mesa_hash_table_create(NULL,
367 _mesa_hash_pointer,
368 _mesa_key_pointer_equal);
369 char *msg = ralloc_vasprintf(errors, format, ap);
370 _mesa_hash_table_insert(errors, ctx->cur_instr, msg);
371 } else {
372 _debug_vprintf(format, ap);
373 }
374 va_end(ap);
375 nir_print_shader_annotated(ctx->s, stdout, errors);
376 ralloc_free(errors);
377 ctx->error = true;
378 debug_assert(0);
379 }
380
381 static struct ir3_instruction *
382 create_addr(struct ir3_block *block, struct ir3_instruction *src, int align)
383 {
384 struct ir3_instruction *instr, *immed;
385
386 /* TODO in at least some cases, the backend could probably be
387 * made clever enough to propagate IR3_REG_HALF..
388 */
389 instr = ir3_COV(block, src, TYPE_U32, TYPE_S16);
390 instr->regs[0]->flags |= IR3_REG_HALF;
391
392 switch(align){
393 case 1:
394 /* src *= 1: */
395 break;
396 case 2:
397 /* src *= 2 => src <<= 1: */
398 immed = create_immed(block, 1);
399 immed->regs[0]->flags |= IR3_REG_HALF;
400
401 instr = ir3_SHL_B(block, instr, 0, immed, 0);
402 instr->regs[0]->flags |= IR3_REG_HALF;
403 instr->regs[1]->flags |= IR3_REG_HALF;
404 break;
405 case 3:
406 /* src *= 3: */
407 immed = create_immed(block, 3);
408 immed->regs[0]->flags |= IR3_REG_HALF;
409
410 instr = ir3_MULL_U(block, instr, 0, immed, 0);
411 instr->regs[0]->flags |= IR3_REG_HALF;
412 instr->regs[1]->flags |= IR3_REG_HALF;
413 break;
414 case 4:
415 /* src *= 4 => src <<= 2: */
416 immed = create_immed(block, 2);
417 immed->regs[0]->flags |= IR3_REG_HALF;
418
419 instr = ir3_SHL_B(block, instr, 0, immed, 0);
420 instr->regs[0]->flags |= IR3_REG_HALF;
421 instr->regs[1]->flags |= IR3_REG_HALF;
422 break;
423 default:
424 unreachable("bad align");
425 return NULL;
426 }
427
428 instr = ir3_MOV(block, instr, TYPE_S16);
429 instr->regs[0]->num = regid(REG_A0, 0);
430 instr->regs[0]->flags |= IR3_REG_HALF;
431 instr->regs[1]->flags |= IR3_REG_HALF;
432
433 return instr;
434 }
435
436 /* caches addr values to avoid generating multiple cov/shl/mova
437 * sequences for each use of a given NIR level src as address
438 */
439 struct ir3_instruction *
440 ir3_get_addr(struct ir3_context *ctx, struct ir3_instruction *src, int align)
441 {
442 struct ir3_instruction *addr;
443 unsigned idx = align - 1;
444
445 compile_assert(ctx, idx < ARRAY_SIZE(ctx->addr_ht));
446
447 if (!ctx->addr_ht[idx]) {
448 ctx->addr_ht[idx] = _mesa_hash_table_create(ctx,
449 _mesa_hash_pointer, _mesa_key_pointer_equal);
450 } else {
451 struct hash_entry *entry;
452 entry = _mesa_hash_table_search(ctx->addr_ht[idx], src);
453 if (entry)
454 return entry->data;
455 }
456
457 addr = create_addr(ctx->block, src, align);
458 _mesa_hash_table_insert(ctx->addr_ht[idx], src, addr);
459
460 return addr;
461 }
462
463 struct ir3_instruction *
464 ir3_get_predicate(struct ir3_context *ctx, struct ir3_instruction *src)
465 {
466 struct ir3_block *b = ctx->block;
467 struct ir3_instruction *cond;
468
469 /* NOTE: only cmps.*.* can write p0.x: */
470 cond = ir3_CMPS_S(b, src, 0, create_immed(b, 0), 0);
471 cond->cat2.condition = IR3_COND_NE;
472
473 /* condition always goes in predicate register: */
474 cond->regs[0]->num = regid(REG_P0, 0);
475
476 return cond;
477 }
478
479 /*
480 * Array helpers
481 */
482
483 void
484 ir3_declare_array(struct ir3_context *ctx, nir_register *reg)
485 {
486 struct ir3_array *arr = rzalloc(ctx, struct ir3_array);
487 arr->id = ++ctx->num_arrays;
488 /* NOTE: sometimes we get non array regs, for example for arrays of
489 * length 1. See fs-const-array-of-struct-of-array.shader_test. So
490 * treat a non-array as if it was an array of length 1.
491 *
492 * It would be nice if there was a nir pass to convert arrays of
493 * length 1 to ssa.
494 */
495 arr->length = reg->num_components * MAX2(1, reg->num_array_elems);
496 compile_assert(ctx, arr->length > 0);
497 arr->r = reg;
498 list_addtail(&arr->node, &ctx->ir->array_list);
499 }
500
501 struct ir3_array *
502 ir3_get_array(struct ir3_context *ctx, nir_register *reg)
503 {
504 list_for_each_entry (struct ir3_array, arr, &ctx->ir->array_list, node) {
505 if (arr->r == reg)
506 return arr;
507 }
508 ir3_context_error(ctx, "bogus reg: %s\n", reg->name);
509 return NULL;
510 }
511
512 /* relative (indirect) if address!=NULL */
513 struct ir3_instruction *
514 ir3_create_array_load(struct ir3_context *ctx, struct ir3_array *arr, int n,
515 struct ir3_instruction *address)
516 {
517 struct ir3_block *block = ctx->block;
518 struct ir3_instruction *mov;
519 struct ir3_register *src;
520
521 mov = ir3_instr_create(block, OPC_MOV);
522 mov->cat1.src_type = TYPE_U32;
523 mov->cat1.dst_type = TYPE_U32;
524 mov->barrier_class = IR3_BARRIER_ARRAY_R;
525 mov->barrier_conflict = IR3_BARRIER_ARRAY_W;
526 ir3_reg_create(mov, 0, 0);
527 src = ir3_reg_create(mov, 0, IR3_REG_ARRAY |
528 COND(address, IR3_REG_RELATIV));
529 src->instr = arr->last_write;
530 src->size = arr->length;
531 src->array.id = arr->id;
532 src->array.offset = n;
533
534 if (address)
535 ir3_instr_set_address(mov, address);
536
537 return mov;
538 }
539
540 /* relative (indirect) if address!=NULL */
541 void
542 ir3_create_array_store(struct ir3_context *ctx, struct ir3_array *arr, int n,
543 struct ir3_instruction *src, struct ir3_instruction *address)
544 {
545 struct ir3_block *block = ctx->block;
546 struct ir3_instruction *mov;
547 struct ir3_register *dst;
548
549 /* if not relative store, don't create an extra mov, since that
550 * ends up being difficult for cp to remove.
551 */
552 if (!address) {
553 dst = src->regs[0];
554
555 src->barrier_class |= IR3_BARRIER_ARRAY_W;
556 src->barrier_conflict |= IR3_BARRIER_ARRAY_R | IR3_BARRIER_ARRAY_W;
557
558 dst->flags |= IR3_REG_ARRAY;
559 dst->instr = arr->last_write;
560 dst->size = arr->length;
561 dst->array.id = arr->id;
562 dst->array.offset = n;
563
564 arr->last_write = src;
565
566 array_insert(block, block->keeps, src);
567
568 return;
569 }
570
571 mov = ir3_instr_create(block, OPC_MOV);
572 mov->cat1.src_type = TYPE_U32;
573 mov->cat1.dst_type = TYPE_U32;
574 mov->barrier_class = IR3_BARRIER_ARRAY_W;
575 mov->barrier_conflict = IR3_BARRIER_ARRAY_R | IR3_BARRIER_ARRAY_W;
576 dst = ir3_reg_create(mov, 0, IR3_REG_ARRAY |
577 COND(address, IR3_REG_RELATIV));
578 dst->instr = arr->last_write;
579 dst->size = arr->length;
580 dst->array.id = arr->id;
581 dst->array.offset = n;
582 ir3_reg_create(mov, 0, IR3_REG_SSA)->instr = src;
583
584 if (address)
585 ir3_instr_set_address(mov, address);
586
587 arr->last_write = mov;
588
589 /* the array store may only matter to something in an earlier
590 * block (ie. loops), but since arrays are not in SSA, depth
591 * pass won't know this.. so keep all array stores:
592 */
593 array_insert(block, block->keeps, mov);
594 }