freedreno/ir3: split out a4xx+ instructions
[mesa.git] / src / freedreno / ir3 / ir3_context.c
1 /*
2 * Copyright (C) 2015-2018 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #include "util/u_math.h"
28
29 #include "ir3_compiler.h"
30 #include "ir3_context.h"
31 #include "ir3_shader.h"
32 #include "ir3_nir.h"
33
34 struct ir3_context *
35 ir3_context_init(struct ir3_compiler *compiler,
36 struct ir3_shader_variant *so)
37 {
38 struct ir3_context *ctx = rzalloc(NULL, struct ir3_context);
39
40 if (compiler->gpu_id >= 400) {
41 if (so->type == MESA_SHADER_VERTEX) {
42 ctx->astc_srgb = so->key.vastc_srgb;
43 } else if (so->type == MESA_SHADER_FRAGMENT) {
44 ctx->astc_srgb = so->key.fastc_srgb;
45 }
46
47 } else {
48 if (so->type == MESA_SHADER_VERTEX) {
49 ctx->samples = so->key.vsamples;
50 } else if (so->type == MESA_SHADER_FRAGMENT) {
51 ctx->samples = so->key.fsamples;
52 }
53 }
54
55 if (compiler->gpu_id >= 400) {
56 ctx->funcs = &ir3_a4xx_funcs;
57 }
58
59 ctx->compiler = compiler;
60 ctx->so = so;
61 ctx->def_ht = _mesa_hash_table_create(ctx,
62 _mesa_hash_pointer, _mesa_key_pointer_equal);
63 ctx->block_ht = _mesa_hash_table_create(ctx,
64 _mesa_hash_pointer, _mesa_key_pointer_equal);
65
66 /* TODO: maybe generate some sort of bitmask of what key
67 * lowers vs what shader has (ie. no need to lower
68 * texture clamp lowering if no texture sample instrs)..
69 * although should be done further up the stack to avoid
70 * creating duplicate variants..
71 */
72
73 if (ir3_key_lowers_nir(&so->key)) {
74 nir_shader *s = nir_shader_clone(ctx, so->shader->nir);
75 ctx->s = ir3_optimize_nir(so->shader, s, &so->key);
76 } else {
77 /* fast-path for shader key that lowers nothing in NIR: */
78 ctx->s = nir_shader_clone(ctx, so->shader->nir);
79 }
80
81 /* this needs to be the last pass run, so do this here instead of
82 * in ir3_optimize_nir():
83 */
84 NIR_PASS_V(ctx->s, nir_lower_bool_to_int32);
85 NIR_PASS_V(ctx->s, nir_lower_locals_to_regs);
86 NIR_PASS_V(ctx->s, nir_convert_from_ssa, true);
87
88 if (ir3_shader_debug & IR3_DBG_DISASM) {
89 DBG("dump nir%dv%d: type=%d, k={cts=%u,hp=%u}",
90 so->shader->id, so->id, so->type,
91 so->key.color_two_side, so->key.half_precision);
92 nir_print_shader(ctx->s, stdout);
93 }
94
95 if (shader_debug_enabled(so->type)) {
96 fprintf(stderr, "NIR (final form) for %s shader:\n",
97 _mesa_shader_stage_to_string(so->type));
98 nir_print_shader(ctx->s, stderr);
99 }
100
101 ir3_nir_scan_driver_consts(ctx->s, &so->const_layout);
102
103 so->num_uniforms = ctx->s->num_uniforms;
104 so->num_ubos = ctx->s->info.num_ubos;
105
106 /* Layout of constant registers, each section aligned to vec4. Note
107 * that pointer size (ubo, etc) changes depending on generation.
108 *
109 * user consts
110 * UBO addresses
111 * SSBO sizes
112 * if (vertex shader) {
113 * driver params (IR3_DP_*)
114 * if (stream_output.num_outputs > 0)
115 * stream-out addresses
116 * }
117 * immediates
118 *
119 * Immediates go last mostly because they are inserted in the CP pass
120 * after the nir -> ir3 frontend.
121 */
122 unsigned constoff = align(ctx->s->num_uniforms, 4);
123 unsigned ptrsz = ir3_pointer_size(ctx);
124
125 memset(&so->constbase, ~0, sizeof(so->constbase));
126
127 if (so->num_ubos > 0) {
128 so->constbase.ubo = constoff;
129 constoff += align(ctx->s->info.num_ubos * ptrsz, 4) / 4;
130 }
131
132 if (so->const_layout.ssbo_size.count > 0) {
133 unsigned cnt = so->const_layout.ssbo_size.count;
134 so->constbase.ssbo_sizes = constoff;
135 constoff += align(cnt, 4) / 4;
136 }
137
138 if (so->const_layout.image_dims.count > 0) {
139 unsigned cnt = so->const_layout.image_dims.count;
140 so->constbase.image_dims = constoff;
141 constoff += align(cnt, 4) / 4;
142 }
143
144 unsigned num_driver_params = 0;
145 if (so->type == MESA_SHADER_VERTEX) {
146 num_driver_params = IR3_DP_VS_COUNT;
147 } else if (so->type == MESA_SHADER_COMPUTE) {
148 num_driver_params = IR3_DP_CS_COUNT;
149 }
150
151 so->constbase.driver_param = constoff;
152 constoff += align(num_driver_params, 4) / 4;
153
154 if ((so->type == MESA_SHADER_VERTEX) &&
155 (compiler->gpu_id < 500) &&
156 so->shader->stream_output.num_outputs > 0) {
157 so->constbase.tfbo = constoff;
158 constoff += align(IR3_MAX_SO_BUFFERS * ptrsz, 4) / 4;
159 }
160
161 so->constbase.immediate = constoff;
162
163 return ctx;
164 }
165
166 void
167 ir3_context_free(struct ir3_context *ctx)
168 {
169 ralloc_free(ctx);
170 }
171
172 /*
173 * Misc helpers
174 */
175
176 /* allocate a n element value array (to be populated by caller) and
177 * insert in def_ht
178 */
179 struct ir3_instruction **
180 ir3_get_dst_ssa(struct ir3_context *ctx, nir_ssa_def *dst, unsigned n)
181 {
182 struct ir3_instruction **value =
183 ralloc_array(ctx->def_ht, struct ir3_instruction *, n);
184 _mesa_hash_table_insert(ctx->def_ht, dst, value);
185 return value;
186 }
187
188 struct ir3_instruction **
189 ir3_get_dst(struct ir3_context *ctx, nir_dest *dst, unsigned n)
190 {
191 struct ir3_instruction **value;
192
193 if (dst->is_ssa) {
194 value = ir3_get_dst_ssa(ctx, &dst->ssa, n);
195 } else {
196 value = ralloc_array(ctx, struct ir3_instruction *, n);
197 }
198
199 /* NOTE: in non-ssa case, we don't really need to store last_dst
200 * but this helps us catch cases where put_dst() call is forgotten
201 */
202 compile_assert(ctx, !ctx->last_dst);
203 ctx->last_dst = value;
204 ctx->last_dst_n = n;
205
206 return value;
207 }
208
209 struct ir3_instruction * const *
210 ir3_get_src(struct ir3_context *ctx, nir_src *src)
211 {
212 if (src->is_ssa) {
213 struct hash_entry *entry;
214 entry = _mesa_hash_table_search(ctx->def_ht, src->ssa);
215 compile_assert(ctx, entry);
216 return entry->data;
217 } else {
218 nir_register *reg = src->reg.reg;
219 struct ir3_array *arr = ir3_get_array(ctx, reg);
220 unsigned num_components = arr->r->num_components;
221 struct ir3_instruction *addr = NULL;
222 struct ir3_instruction **value =
223 ralloc_array(ctx, struct ir3_instruction *, num_components);
224
225 if (src->reg.indirect)
226 addr = ir3_get_addr(ctx, ir3_get_src(ctx, src->reg.indirect)[0],
227 reg->num_components);
228
229 for (unsigned i = 0; i < num_components; i++) {
230 unsigned n = src->reg.base_offset * reg->num_components + i;
231 compile_assert(ctx, n < arr->length);
232 value[i] = ir3_create_array_load(ctx, arr, n, addr);
233 }
234
235 return value;
236 }
237 }
238
239 void
240 put_dst(struct ir3_context *ctx, nir_dest *dst)
241 {
242 unsigned bit_size = nir_dest_bit_size(*dst);
243
244 if (bit_size < 32) {
245 for (unsigned i = 0; i < ctx->last_dst_n; i++) {
246 struct ir3_instruction *dst = ctx->last_dst[i];
247 dst->regs[0]->flags |= IR3_REG_HALF;
248 if (ctx->last_dst[i]->opc == OPC_META_FO)
249 dst->regs[1]->instr->regs[0]->flags |= IR3_REG_HALF;
250 }
251 }
252
253 if (!dst->is_ssa) {
254 nir_register *reg = dst->reg.reg;
255 struct ir3_array *arr = ir3_get_array(ctx, reg);
256 unsigned num_components = ctx->last_dst_n;
257 struct ir3_instruction *addr = NULL;
258
259 if (dst->reg.indirect)
260 addr = ir3_get_addr(ctx, ir3_get_src(ctx, dst->reg.indirect)[0],
261 reg->num_components);
262
263 for (unsigned i = 0; i < num_components; i++) {
264 unsigned n = dst->reg.base_offset * reg->num_components + i;
265 compile_assert(ctx, n < arr->length);
266 if (!ctx->last_dst[i])
267 continue;
268 ir3_create_array_store(ctx, arr, n, ctx->last_dst[i], addr);
269 }
270
271 ralloc_free(ctx->last_dst);
272 }
273 ctx->last_dst = NULL;
274 ctx->last_dst_n = 0;
275 }
276
277 struct ir3_instruction *
278 ir3_create_collect(struct ir3_context *ctx, struct ir3_instruction *const *arr,
279 unsigned arrsz)
280 {
281 struct ir3_block *block = ctx->block;
282 struct ir3_instruction *collect;
283
284 if (arrsz == 0)
285 return NULL;
286
287 unsigned flags = arr[0]->regs[0]->flags & IR3_REG_HALF;
288
289 collect = ir3_instr_create2(block, OPC_META_FI, 1 + arrsz);
290 ir3_reg_create(collect, 0, flags); /* dst */
291 for (unsigned i = 0; i < arrsz; i++) {
292 struct ir3_instruction *elem = arr[i];
293
294 /* Since arrays are pre-colored in RA, we can't assume that
295 * things will end up in the right place. (Ie. if a collect
296 * joins elements from two different arrays.) So insert an
297 * extra mov.
298 *
299 * We could possibly skip this if all the collected elements
300 * are contiguous elements in a single array.. not sure how
301 * likely that is to happen.
302 *
303 * Fixes a problem with glamor shaders, that in effect do
304 * something like:
305 *
306 * if (foo)
307 * texcoord = ..
308 * else
309 * texcoord = ..
310 * color = texture2D(tex, texcoord);
311 *
312 * In this case, texcoord will end up as nir registers (which
313 * translate to ir3 array's of length 1. And we can't assume
314 * the two (or more) arrays will get allocated in consecutive
315 * scalar registers.
316 *
317 */
318 if (elem->regs[0]->flags & IR3_REG_ARRAY) {
319 type_t type = (flags & IR3_REG_HALF) ? TYPE_U16 : TYPE_U32;
320 elem = ir3_MOV(block, elem, type);
321 }
322
323 compile_assert(ctx, (elem->regs[0]->flags & IR3_REG_HALF) == flags);
324 ir3_reg_create(collect, 0, IR3_REG_SSA | flags)->instr = elem;
325 }
326
327 return collect;
328 }
329
330 /* helper for instructions that produce multiple consecutive scalar
331 * outputs which need to have a split/fanout meta instruction inserted
332 */
333 void
334 ir3_split_dest(struct ir3_block *block, struct ir3_instruction **dst,
335 struct ir3_instruction *src, unsigned base, unsigned n)
336 {
337 struct ir3_instruction *prev = NULL;
338
339 if ((n == 1) && (src->regs[0]->wrmask == 0x1)) {
340 dst[0] = src;
341 return;
342 }
343
344 for (int i = 0, j = 0; i < n; i++) {
345 struct ir3_instruction *split = ir3_instr_create(block, OPC_META_FO);
346 ir3_reg_create(split, 0, IR3_REG_SSA);
347 ir3_reg_create(split, 0, IR3_REG_SSA)->instr = src;
348 split->fo.off = i + base;
349
350 if (prev) {
351 split->cp.left = prev;
352 split->cp.left_cnt++;
353 prev->cp.right = split;
354 prev->cp.right_cnt++;
355 }
356 prev = split;
357
358 if (src->regs[0]->wrmask & (1 << (i + base)))
359 dst[j++] = split;
360 }
361 }
362
363 void
364 ir3_context_error(struct ir3_context *ctx, const char *format, ...)
365 {
366 struct hash_table *errors = NULL;
367 va_list ap;
368 va_start(ap, format);
369 if (ctx->cur_instr) {
370 errors = _mesa_hash_table_create(NULL,
371 _mesa_hash_pointer,
372 _mesa_key_pointer_equal);
373 char *msg = ralloc_vasprintf(errors, format, ap);
374 _mesa_hash_table_insert(errors, ctx->cur_instr, msg);
375 } else {
376 _debug_vprintf(format, ap);
377 }
378 va_end(ap);
379 nir_print_shader_annotated(ctx->s, stdout, errors);
380 ralloc_free(errors);
381 ctx->error = true;
382 debug_assert(0);
383 }
384
385 static struct ir3_instruction *
386 create_addr(struct ir3_block *block, struct ir3_instruction *src, int align)
387 {
388 struct ir3_instruction *instr, *immed;
389
390 /* TODO in at least some cases, the backend could probably be
391 * made clever enough to propagate IR3_REG_HALF..
392 */
393 instr = ir3_COV(block, src, TYPE_U32, TYPE_S16);
394 instr->regs[0]->flags |= IR3_REG_HALF;
395
396 switch(align){
397 case 1:
398 /* src *= 1: */
399 break;
400 case 2:
401 /* src *= 2 => src <<= 1: */
402 immed = create_immed(block, 1);
403 immed->regs[0]->flags |= IR3_REG_HALF;
404
405 instr = ir3_SHL_B(block, instr, 0, immed, 0);
406 instr->regs[0]->flags |= IR3_REG_HALF;
407 instr->regs[1]->flags |= IR3_REG_HALF;
408 break;
409 case 3:
410 /* src *= 3: */
411 immed = create_immed(block, 3);
412 immed->regs[0]->flags |= IR3_REG_HALF;
413
414 instr = ir3_MULL_U(block, instr, 0, immed, 0);
415 instr->regs[0]->flags |= IR3_REG_HALF;
416 instr->regs[1]->flags |= IR3_REG_HALF;
417 break;
418 case 4:
419 /* src *= 4 => src <<= 2: */
420 immed = create_immed(block, 2);
421 immed->regs[0]->flags |= IR3_REG_HALF;
422
423 instr = ir3_SHL_B(block, instr, 0, immed, 0);
424 instr->regs[0]->flags |= IR3_REG_HALF;
425 instr->regs[1]->flags |= IR3_REG_HALF;
426 break;
427 default:
428 unreachable("bad align");
429 return NULL;
430 }
431
432 instr = ir3_MOV(block, instr, TYPE_S16);
433 instr->regs[0]->num = regid(REG_A0, 0);
434 instr->regs[0]->flags |= IR3_REG_HALF;
435 instr->regs[1]->flags |= IR3_REG_HALF;
436
437 return instr;
438 }
439
440 /* caches addr values to avoid generating multiple cov/shl/mova
441 * sequences for each use of a given NIR level src as address
442 */
443 struct ir3_instruction *
444 ir3_get_addr(struct ir3_context *ctx, struct ir3_instruction *src, int align)
445 {
446 struct ir3_instruction *addr;
447 unsigned idx = align - 1;
448
449 compile_assert(ctx, idx < ARRAY_SIZE(ctx->addr_ht));
450
451 if (!ctx->addr_ht[idx]) {
452 ctx->addr_ht[idx] = _mesa_hash_table_create(ctx,
453 _mesa_hash_pointer, _mesa_key_pointer_equal);
454 } else {
455 struct hash_entry *entry;
456 entry = _mesa_hash_table_search(ctx->addr_ht[idx], src);
457 if (entry)
458 return entry->data;
459 }
460
461 addr = create_addr(ctx->block, src, align);
462 _mesa_hash_table_insert(ctx->addr_ht[idx], src, addr);
463
464 return addr;
465 }
466
467 struct ir3_instruction *
468 ir3_get_predicate(struct ir3_context *ctx, struct ir3_instruction *src)
469 {
470 struct ir3_block *b = ctx->block;
471 struct ir3_instruction *cond;
472
473 /* NOTE: only cmps.*.* can write p0.x: */
474 cond = ir3_CMPS_S(b, src, 0, create_immed(b, 0), 0);
475 cond->cat2.condition = IR3_COND_NE;
476
477 /* condition always goes in predicate register: */
478 cond->regs[0]->num = regid(REG_P0, 0);
479
480 return cond;
481 }
482
483 /*
484 * Array helpers
485 */
486
487 void
488 ir3_declare_array(struct ir3_context *ctx, nir_register *reg)
489 {
490 struct ir3_array *arr = rzalloc(ctx, struct ir3_array);
491 arr->id = ++ctx->num_arrays;
492 /* NOTE: sometimes we get non array regs, for example for arrays of
493 * length 1. See fs-const-array-of-struct-of-array.shader_test. So
494 * treat a non-array as if it was an array of length 1.
495 *
496 * It would be nice if there was a nir pass to convert arrays of
497 * length 1 to ssa.
498 */
499 arr->length = reg->num_components * MAX2(1, reg->num_array_elems);
500 compile_assert(ctx, arr->length > 0);
501 arr->r = reg;
502 list_addtail(&arr->node, &ctx->ir->array_list);
503 }
504
505 struct ir3_array *
506 ir3_get_array(struct ir3_context *ctx, nir_register *reg)
507 {
508 list_for_each_entry (struct ir3_array, arr, &ctx->ir->array_list, node) {
509 if (arr->r == reg)
510 return arr;
511 }
512 ir3_context_error(ctx, "bogus reg: %s\n", reg->name);
513 return NULL;
514 }
515
516 /* relative (indirect) if address!=NULL */
517 struct ir3_instruction *
518 ir3_create_array_load(struct ir3_context *ctx, struct ir3_array *arr, int n,
519 struct ir3_instruction *address)
520 {
521 struct ir3_block *block = ctx->block;
522 struct ir3_instruction *mov;
523 struct ir3_register *src;
524
525 mov = ir3_instr_create(block, OPC_MOV);
526 mov->cat1.src_type = TYPE_U32;
527 mov->cat1.dst_type = TYPE_U32;
528 mov->barrier_class = IR3_BARRIER_ARRAY_R;
529 mov->barrier_conflict = IR3_BARRIER_ARRAY_W;
530 ir3_reg_create(mov, 0, 0);
531 src = ir3_reg_create(mov, 0, IR3_REG_ARRAY |
532 COND(address, IR3_REG_RELATIV));
533 src->instr = arr->last_write;
534 src->size = arr->length;
535 src->array.id = arr->id;
536 src->array.offset = n;
537
538 if (address)
539 ir3_instr_set_address(mov, address);
540
541 return mov;
542 }
543
544 /* relative (indirect) if address!=NULL */
545 void
546 ir3_create_array_store(struct ir3_context *ctx, struct ir3_array *arr, int n,
547 struct ir3_instruction *src, struct ir3_instruction *address)
548 {
549 struct ir3_block *block = ctx->block;
550 struct ir3_instruction *mov;
551 struct ir3_register *dst;
552
553 /* if not relative store, don't create an extra mov, since that
554 * ends up being difficult for cp to remove.
555 */
556 if (!address) {
557 dst = src->regs[0];
558
559 src->barrier_class |= IR3_BARRIER_ARRAY_W;
560 src->barrier_conflict |= IR3_BARRIER_ARRAY_R | IR3_BARRIER_ARRAY_W;
561
562 dst->flags |= IR3_REG_ARRAY;
563 dst->instr = arr->last_write;
564 dst->size = arr->length;
565 dst->array.id = arr->id;
566 dst->array.offset = n;
567
568 arr->last_write = src;
569
570 array_insert(block, block->keeps, src);
571
572 return;
573 }
574
575 mov = ir3_instr_create(block, OPC_MOV);
576 mov->cat1.src_type = TYPE_U32;
577 mov->cat1.dst_type = TYPE_U32;
578 mov->barrier_class = IR3_BARRIER_ARRAY_W;
579 mov->barrier_conflict = IR3_BARRIER_ARRAY_R | IR3_BARRIER_ARRAY_W;
580 dst = ir3_reg_create(mov, 0, IR3_REG_ARRAY |
581 COND(address, IR3_REG_RELATIV));
582 dst->instr = arr->last_write;
583 dst->size = arr->length;
584 dst->array.id = arr->id;
585 dst->array.offset = n;
586 ir3_reg_create(mov, 0, IR3_REG_SSA)->instr = src;
587
588 if (address)
589 ir3_instr_set_address(mov, address);
590
591 arr->last_write = mov;
592
593 /* the array store may only matter to something in an earlier
594 * block (ie. loops), but since arrays are not in SSA, depth
595 * pass won't know this.. so keep all array stores:
596 */
597 array_insert(block, block->keeps, mov);
598 }