freedreno/ir3: split out const_state setup
[mesa.git] / src / freedreno / ir3 / ir3_context.c
1 /*
2 * Copyright (C) 2015-2018 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #include "ir3_compiler.h"
28 #include "ir3_context.h"
29 #include "ir3_image.h"
30 #include "ir3_shader.h"
31 #include "ir3_nir.h"
32
33 struct ir3_context *
34 ir3_context_init(struct ir3_compiler *compiler,
35 struct ir3_shader_variant *so)
36 {
37 struct ir3_context *ctx = rzalloc(NULL, struct ir3_context);
38
39 if (compiler->gpu_id >= 400) {
40 if (so->type == MESA_SHADER_VERTEX) {
41 ctx->astc_srgb = so->key.vastc_srgb;
42 } else if (so->type == MESA_SHADER_FRAGMENT) {
43 ctx->astc_srgb = so->key.fastc_srgb;
44 }
45
46 } else {
47 if (so->type == MESA_SHADER_VERTEX) {
48 ctx->samples = so->key.vsamples;
49 } else if (so->type == MESA_SHADER_FRAGMENT) {
50 ctx->samples = so->key.fsamples;
51 }
52 }
53
54 if (compiler->gpu_id >= 600) {
55 ctx->funcs = &ir3_a6xx_funcs;
56 } else if (compiler->gpu_id >= 400) {
57 ctx->funcs = &ir3_a4xx_funcs;
58 }
59
60 ctx->compiler = compiler;
61 ctx->so = so;
62 ctx->def_ht = _mesa_hash_table_create(ctx,
63 _mesa_hash_pointer, _mesa_key_pointer_equal);
64 ctx->block_ht = _mesa_hash_table_create(ctx,
65 _mesa_hash_pointer, _mesa_key_pointer_equal);
66
67 /* TODO: maybe generate some sort of bitmask of what key
68 * lowers vs what shader has (ie. no need to lower
69 * texture clamp lowering if no texture sample instrs)..
70 * although should be done further up the stack to avoid
71 * creating duplicate variants..
72 */
73
74 if (ir3_key_lowers_nir(&so->key)) {
75 nir_shader *s = nir_shader_clone(ctx, so->shader->nir);
76 ctx->s = ir3_optimize_nir(so->shader, s, &so->key);
77 } else {
78 /* fast-path for shader key that lowers nothing in NIR: */
79 ctx->s = nir_shader_clone(ctx, so->shader->nir);
80 }
81
82 /* this needs to be the last pass run, so do this here instead of
83 * in ir3_optimize_nir():
84 */
85 NIR_PASS_V(ctx->s, nir_lower_bool_to_int32);
86 NIR_PASS_V(ctx->s, nir_lower_locals_to_regs);
87 NIR_PASS_V(ctx->s, nir_convert_from_ssa, true);
88
89 if (ir3_shader_debug & IR3_DBG_DISASM) {
90 DBG("dump nir%dv%d: type=%d, k={cts=%u,hp=%u}",
91 so->shader->id, so->id, so->type,
92 so->key.color_two_side, so->key.half_precision);
93 nir_print_shader(ctx->s, stdout);
94 }
95
96 if (shader_debug_enabled(so->type)) {
97 fprintf(stderr, "NIR (final form) for %s shader:\n",
98 _mesa_shader_stage_to_string(so->type));
99 nir_print_shader(ctx->s, stderr);
100 }
101
102 ir3_ibo_mapping_init(&so->image_mapping, ctx->s->info.num_textures);
103
104 ir3_setup_const_state(so);
105
106 return ctx;
107 }
108
109 void
110 ir3_context_free(struct ir3_context *ctx)
111 {
112 ralloc_free(ctx);
113 }
114
115 /*
116 * Misc helpers
117 */
118
119 /* allocate a n element value array (to be populated by caller) and
120 * insert in def_ht
121 */
122 struct ir3_instruction **
123 ir3_get_dst_ssa(struct ir3_context *ctx, nir_ssa_def *dst, unsigned n)
124 {
125 struct ir3_instruction **value =
126 ralloc_array(ctx->def_ht, struct ir3_instruction *, n);
127 _mesa_hash_table_insert(ctx->def_ht, dst, value);
128 return value;
129 }
130
131 struct ir3_instruction **
132 ir3_get_dst(struct ir3_context *ctx, nir_dest *dst, unsigned n)
133 {
134 struct ir3_instruction **value;
135
136 if (dst->is_ssa) {
137 value = ir3_get_dst_ssa(ctx, &dst->ssa, n);
138 } else {
139 value = ralloc_array(ctx, struct ir3_instruction *, n);
140 }
141
142 /* NOTE: in non-ssa case, we don't really need to store last_dst
143 * but this helps us catch cases where put_dst() call is forgotten
144 */
145 compile_assert(ctx, !ctx->last_dst);
146 ctx->last_dst = value;
147 ctx->last_dst_n = n;
148
149 return value;
150 }
151
152 struct ir3_instruction * const *
153 ir3_get_src(struct ir3_context *ctx, nir_src *src)
154 {
155 if (src->is_ssa) {
156 struct hash_entry *entry;
157 entry = _mesa_hash_table_search(ctx->def_ht, src->ssa);
158 compile_assert(ctx, entry);
159 return entry->data;
160 } else {
161 nir_register *reg = src->reg.reg;
162 struct ir3_array *arr = ir3_get_array(ctx, reg);
163 unsigned num_components = arr->r->num_components;
164 struct ir3_instruction *addr = NULL;
165 struct ir3_instruction **value =
166 ralloc_array(ctx, struct ir3_instruction *, num_components);
167
168 if (src->reg.indirect)
169 addr = ir3_get_addr(ctx, ir3_get_src(ctx, src->reg.indirect)[0],
170 reg->num_components);
171
172 for (unsigned i = 0; i < num_components; i++) {
173 unsigned n = src->reg.base_offset * reg->num_components + i;
174 compile_assert(ctx, n < arr->length);
175 value[i] = ir3_create_array_load(ctx, arr, n, addr);
176 }
177
178 return value;
179 }
180 }
181
182 void
183 ir3_put_dst(struct ir3_context *ctx, nir_dest *dst)
184 {
185 unsigned bit_size = nir_dest_bit_size(*dst);
186
187 /* add extra mov if dst value is HIGH reg.. in some cases not all
188 * instructions can read from HIGH regs, in cases where they can
189 * ir3_cp will clean up the extra mov:
190 */
191 for (unsigned i = 0; i < ctx->last_dst_n; i++) {
192 if (!ctx->last_dst[i])
193 continue;
194 if (ctx->last_dst[i]->regs[0]->flags & IR3_REG_HIGH) {
195 ctx->last_dst[i] = ir3_MOV(ctx->block, ctx->last_dst[i], TYPE_U32);
196 }
197 }
198
199 if (bit_size < 32) {
200 for (unsigned i = 0; i < ctx->last_dst_n; i++) {
201 struct ir3_instruction *dst = ctx->last_dst[i];
202 dst->regs[0]->flags |= IR3_REG_HALF;
203 if (ctx->last_dst[i]->opc == OPC_META_FO)
204 dst->regs[1]->instr->regs[0]->flags |= IR3_REG_HALF;
205 }
206 }
207
208 if (!dst->is_ssa) {
209 nir_register *reg = dst->reg.reg;
210 struct ir3_array *arr = ir3_get_array(ctx, reg);
211 unsigned num_components = ctx->last_dst_n;
212 struct ir3_instruction *addr = NULL;
213
214 if (dst->reg.indirect)
215 addr = ir3_get_addr(ctx, ir3_get_src(ctx, dst->reg.indirect)[0],
216 reg->num_components);
217
218 for (unsigned i = 0; i < num_components; i++) {
219 unsigned n = dst->reg.base_offset * reg->num_components + i;
220 compile_assert(ctx, n < arr->length);
221 if (!ctx->last_dst[i])
222 continue;
223 ir3_create_array_store(ctx, arr, n, ctx->last_dst[i], addr);
224 }
225
226 ralloc_free(ctx->last_dst);
227 }
228
229 ctx->last_dst = NULL;
230 ctx->last_dst_n = 0;
231 }
232
233 struct ir3_instruction *
234 ir3_create_collect(struct ir3_context *ctx, struct ir3_instruction *const *arr,
235 unsigned arrsz)
236 {
237 struct ir3_block *block = ctx->block;
238 struct ir3_instruction *collect;
239
240 if (arrsz == 0)
241 return NULL;
242
243 unsigned flags = arr[0]->regs[0]->flags & IR3_REG_HALF;
244
245 collect = ir3_instr_create2(block, OPC_META_FI, 1 + arrsz);
246 ir3_reg_create(collect, 0, flags); /* dst */
247 for (unsigned i = 0; i < arrsz; i++) {
248 struct ir3_instruction *elem = arr[i];
249
250 /* Since arrays are pre-colored in RA, we can't assume that
251 * things will end up in the right place. (Ie. if a collect
252 * joins elements from two different arrays.) So insert an
253 * extra mov.
254 *
255 * We could possibly skip this if all the collected elements
256 * are contiguous elements in a single array.. not sure how
257 * likely that is to happen.
258 *
259 * Fixes a problem with glamor shaders, that in effect do
260 * something like:
261 *
262 * if (foo)
263 * texcoord = ..
264 * else
265 * texcoord = ..
266 * color = texture2D(tex, texcoord);
267 *
268 * In this case, texcoord will end up as nir registers (which
269 * translate to ir3 array's of length 1. And we can't assume
270 * the two (or more) arrays will get allocated in consecutive
271 * scalar registers.
272 *
273 */
274 if (elem->regs[0]->flags & IR3_REG_ARRAY) {
275 type_t type = (flags & IR3_REG_HALF) ? TYPE_U16 : TYPE_U32;
276 elem = ir3_MOV(block, elem, type);
277 }
278
279 compile_assert(ctx, (elem->regs[0]->flags & IR3_REG_HALF) == flags);
280 ir3_reg_create(collect, 0, IR3_REG_SSA | flags)->instr = elem;
281 }
282
283 collect->regs[0]->wrmask = MASK(arrsz);
284
285 return collect;
286 }
287
288 /* helper for instructions that produce multiple consecutive scalar
289 * outputs which need to have a split/fanout meta instruction inserted
290 */
291 void
292 ir3_split_dest(struct ir3_block *block, struct ir3_instruction **dst,
293 struct ir3_instruction *src, unsigned base, unsigned n)
294 {
295 struct ir3_instruction *prev = NULL;
296
297 if ((n == 1) && (src->regs[0]->wrmask == 0x1)) {
298 dst[0] = src;
299 return;
300 }
301
302 unsigned flags = src->regs[0]->flags & (IR3_REG_HALF | IR3_REG_HIGH);
303
304 for (int i = 0, j = 0; i < n; i++) {
305 struct ir3_instruction *split = ir3_instr_create(block, OPC_META_FO);
306 ir3_reg_create(split, 0, IR3_REG_SSA | flags);
307 ir3_reg_create(split, 0, IR3_REG_SSA | flags)->instr = src;
308 split->fo.off = i + base;
309
310 if (prev) {
311 split->cp.left = prev;
312 split->cp.left_cnt++;
313 prev->cp.right = split;
314 prev->cp.right_cnt++;
315 }
316 prev = split;
317
318 if (src->regs[0]->wrmask & (1 << (i + base)))
319 dst[j++] = split;
320 }
321 }
322
323 NORETURN void
324 ir3_context_error(struct ir3_context *ctx, const char *format, ...)
325 {
326 struct hash_table *errors = NULL;
327 va_list ap;
328 va_start(ap, format);
329 if (ctx->cur_instr) {
330 errors = _mesa_hash_table_create(NULL,
331 _mesa_hash_pointer,
332 _mesa_key_pointer_equal);
333 char *msg = ralloc_vasprintf(errors, format, ap);
334 _mesa_hash_table_insert(errors, ctx->cur_instr, msg);
335 } else {
336 _debug_vprintf(format, ap);
337 }
338 va_end(ap);
339 nir_print_shader_annotated(ctx->s, stdout, errors);
340 ralloc_free(errors);
341 ctx->error = true;
342 unreachable("");
343 }
344
345 static struct ir3_instruction *
346 create_addr(struct ir3_block *block, struct ir3_instruction *src, int align)
347 {
348 struct ir3_instruction *instr, *immed;
349
350 /* TODO in at least some cases, the backend could probably be
351 * made clever enough to propagate IR3_REG_HALF..
352 */
353 instr = ir3_COV(block, src, TYPE_U32, TYPE_S16);
354 instr->regs[0]->flags |= IR3_REG_HALF;
355
356 switch(align){
357 case 1:
358 /* src *= 1: */
359 break;
360 case 2:
361 /* src *= 2 => src <<= 1: */
362 immed = create_immed(block, 1);
363 immed->regs[0]->flags |= IR3_REG_HALF;
364
365 instr = ir3_SHL_B(block, instr, 0, immed, 0);
366 instr->regs[0]->flags |= IR3_REG_HALF;
367 instr->regs[1]->flags |= IR3_REG_HALF;
368 break;
369 case 3:
370 /* src *= 3: */
371 immed = create_immed(block, 3);
372 immed->regs[0]->flags |= IR3_REG_HALF;
373
374 instr = ir3_MULL_U(block, instr, 0, immed, 0);
375 instr->regs[0]->flags |= IR3_REG_HALF;
376 instr->regs[1]->flags |= IR3_REG_HALF;
377 break;
378 case 4:
379 /* src *= 4 => src <<= 2: */
380 immed = create_immed(block, 2);
381 immed->regs[0]->flags |= IR3_REG_HALF;
382
383 instr = ir3_SHL_B(block, instr, 0, immed, 0);
384 instr->regs[0]->flags |= IR3_REG_HALF;
385 instr->regs[1]->flags |= IR3_REG_HALF;
386 break;
387 default:
388 unreachable("bad align");
389 return NULL;
390 }
391
392 instr = ir3_MOV(block, instr, TYPE_S16);
393 instr->regs[0]->num = regid(REG_A0, 0);
394 instr->regs[0]->flags |= IR3_REG_HALF;
395 instr->regs[1]->flags |= IR3_REG_HALF;
396
397 return instr;
398 }
399
400 /* caches addr values to avoid generating multiple cov/shl/mova
401 * sequences for each use of a given NIR level src as address
402 */
403 struct ir3_instruction *
404 ir3_get_addr(struct ir3_context *ctx, struct ir3_instruction *src, int align)
405 {
406 struct ir3_instruction *addr;
407 unsigned idx = align - 1;
408
409 compile_assert(ctx, idx < ARRAY_SIZE(ctx->addr_ht));
410
411 if (!ctx->addr_ht[idx]) {
412 ctx->addr_ht[idx] = _mesa_hash_table_create(ctx,
413 _mesa_hash_pointer, _mesa_key_pointer_equal);
414 } else {
415 struct hash_entry *entry;
416 entry = _mesa_hash_table_search(ctx->addr_ht[idx], src);
417 if (entry)
418 return entry->data;
419 }
420
421 addr = create_addr(ctx->block, src, align);
422 _mesa_hash_table_insert(ctx->addr_ht[idx], src, addr);
423
424 return addr;
425 }
426
427 struct ir3_instruction *
428 ir3_get_predicate(struct ir3_context *ctx, struct ir3_instruction *src)
429 {
430 struct ir3_block *b = ctx->block;
431 struct ir3_instruction *cond;
432
433 /* NOTE: only cmps.*.* can write p0.x: */
434 cond = ir3_CMPS_S(b, src, 0, create_immed(b, 0), 0);
435 cond->cat2.condition = IR3_COND_NE;
436
437 /* condition always goes in predicate register: */
438 cond->regs[0]->num = regid(REG_P0, 0);
439
440 return cond;
441 }
442
443 /*
444 * Array helpers
445 */
446
447 void
448 ir3_declare_array(struct ir3_context *ctx, nir_register *reg)
449 {
450 struct ir3_array *arr = rzalloc(ctx, struct ir3_array);
451 arr->id = ++ctx->num_arrays;
452 /* NOTE: sometimes we get non array regs, for example for arrays of
453 * length 1. See fs-const-array-of-struct-of-array.shader_test. So
454 * treat a non-array as if it was an array of length 1.
455 *
456 * It would be nice if there was a nir pass to convert arrays of
457 * length 1 to ssa.
458 */
459 arr->length = reg->num_components * MAX2(1, reg->num_array_elems);
460 compile_assert(ctx, arr->length > 0);
461 arr->r = reg;
462 list_addtail(&arr->node, &ctx->ir->array_list);
463 }
464
465 struct ir3_array *
466 ir3_get_array(struct ir3_context *ctx, nir_register *reg)
467 {
468 list_for_each_entry (struct ir3_array, arr, &ctx->ir->array_list, node) {
469 if (arr->r == reg)
470 return arr;
471 }
472 ir3_context_error(ctx, "bogus reg: %s\n", reg->name);
473 return NULL;
474 }
475
476 /* relative (indirect) if address!=NULL */
477 struct ir3_instruction *
478 ir3_create_array_load(struct ir3_context *ctx, struct ir3_array *arr, int n,
479 struct ir3_instruction *address)
480 {
481 struct ir3_block *block = ctx->block;
482 struct ir3_instruction *mov;
483 struct ir3_register *src;
484
485 mov = ir3_instr_create(block, OPC_MOV);
486 mov->cat1.src_type = TYPE_U32;
487 mov->cat1.dst_type = TYPE_U32;
488 mov->barrier_class = IR3_BARRIER_ARRAY_R;
489 mov->barrier_conflict = IR3_BARRIER_ARRAY_W;
490 ir3_reg_create(mov, 0, 0);
491 src = ir3_reg_create(mov, 0, IR3_REG_ARRAY |
492 COND(address, IR3_REG_RELATIV));
493 src->instr = arr->last_write;
494 src->size = arr->length;
495 src->array.id = arr->id;
496 src->array.offset = n;
497
498 if (address)
499 ir3_instr_set_address(mov, address);
500
501 return mov;
502 }
503
504 /* relative (indirect) if address!=NULL */
505 void
506 ir3_create_array_store(struct ir3_context *ctx, struct ir3_array *arr, int n,
507 struct ir3_instruction *src, struct ir3_instruction *address)
508 {
509 struct ir3_block *block = ctx->block;
510 struct ir3_instruction *mov;
511 struct ir3_register *dst;
512
513 /* if not relative store, don't create an extra mov, since that
514 * ends up being difficult for cp to remove.
515 */
516 if (!address) {
517 dst = src->regs[0];
518
519 src->barrier_class |= IR3_BARRIER_ARRAY_W;
520 src->barrier_conflict |= IR3_BARRIER_ARRAY_R | IR3_BARRIER_ARRAY_W;
521
522 dst->flags |= IR3_REG_ARRAY;
523 dst->instr = arr->last_write;
524 dst->size = arr->length;
525 dst->array.id = arr->id;
526 dst->array.offset = n;
527
528 arr->last_write = src;
529
530 array_insert(block, block->keeps, src);
531
532 return;
533 }
534
535 mov = ir3_instr_create(block, OPC_MOV);
536 mov->cat1.src_type = TYPE_U32;
537 mov->cat1.dst_type = TYPE_U32;
538 mov->barrier_class = IR3_BARRIER_ARRAY_W;
539 mov->barrier_conflict = IR3_BARRIER_ARRAY_R | IR3_BARRIER_ARRAY_W;
540 dst = ir3_reg_create(mov, 0, IR3_REG_ARRAY |
541 COND(address, IR3_REG_RELATIV));
542 dst->instr = arr->last_write;
543 dst->size = arr->length;
544 dst->array.id = arr->id;
545 dst->array.offset = n;
546 ir3_reg_create(mov, 0, IR3_REG_SSA)->instr = src;
547
548 if (address)
549 ir3_instr_set_address(mov, address);
550
551 arr->last_write = mov;
552
553 /* the array store may only matter to something in an earlier
554 * block (ie. loops), but since arrays are not in SSA, depth
555 * pass won't know this.. so keep all array stores:
556 */
557 array_insert(block, block->keeps, mov);
558 }