freedreno/ir3: compute shader support
[mesa.git] / src / gallium / drivers / freedreno / ir3 / ir3_compiler_nir.c
1 /* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
2
3 /*
4 * Copyright (C) 2015 Rob Clark <robclark@freedesktop.org>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * SOFTWARE.
24 *
25 * Authors:
26 * Rob Clark <robclark@freedesktop.org>
27 */
28
29 #include <stdarg.h>
30
31 #include "pipe/p_state.h"
32 #include "util/u_string.h"
33 #include "util/u_memory.h"
34 #include "util/u_inlines.h"
35
36 #include "freedreno_util.h"
37
38 #include "ir3_compiler.h"
39 #include "ir3_shader.h"
40 #include "ir3_nir.h"
41
42 #include "instr-a3xx.h"
43 #include "ir3.h"
44
45
46 struct ir3_compile {
47 struct ir3_compiler *compiler;
48
49 struct nir_shader *s;
50
51 struct ir3 *ir;
52 struct ir3_shader_variant *so;
53
54 struct ir3_block *block; /* the current block */
55 struct ir3_block *in_block; /* block created for shader inputs */
56
57 nir_function_impl *impl;
58
59 /* For fragment shaders, from the hw perspective the only
60 * actual input is r0.xy position register passed to bary.f.
61 * But TGSI doesn't know that, it still declares things as
62 * IN[] registers. So we do all the input tracking normally
63 * and fix things up after compile_instructions()
64 *
65 * NOTE that frag_pos is the hardware position (possibly it
66 * is actually an index or tag or some such.. it is *not*
67 * values that can be directly used for gl_FragCoord..)
68 */
69 struct ir3_instruction *frag_pos, *frag_face, *frag_coord[4];
70
71 /* For vertex shaders, keep track of the system values sources */
72 struct ir3_instruction *vertex_id, *basevertex, *instance_id;
73
74 /* Compute shader inputs: */
75 struct ir3_instruction *local_invocation_id, *work_group_id;
76
77 /* For SSBO's and atomics, we need to preserve order, such
78 * that reads don't overtake writes, and the order of writes
79 * is preserved. Atomics are considered as a write.
80 *
81 * To do this, we track last write and last access, in a
82 * similar way to ir3_array. But since we don't know whether
83 * the same SSBO is bound to multiple slots, so we simply
84 * track this globally rather than per-SSBO.
85 *
86 * TODO should we track this per block instead? I guess it
87 * shouldn't matter much?
88 */
89 struct ir3_instruction *last_write, *last_access;
90
91 /* mapping from nir_register to defining instruction: */
92 struct hash_table *def_ht;
93
94 unsigned num_arrays;
95
96 /* a common pattern for indirect addressing is to request the
97 * same address register multiple times. To avoid generating
98 * duplicate instruction sequences (which our backend does not
99 * try to clean up, since that should be done as the NIR stage)
100 * we cache the address value generated for a given src value:
101 */
102 struct hash_table *addr_ht;
103
104 /* maps nir_block to ir3_block, mostly for the purposes of
105 * figuring out the blocks successors
106 */
107 struct hash_table *block_ht;
108
109 /* a4xx (at least patchlevel 0) cannot seem to flat-interpolate
110 * so we need to use ldlv.u32 to load the varying directly:
111 */
112 bool flat_bypass;
113
114 /* on a3xx, we need to add one to # of array levels:
115 */
116 bool levels_add_one;
117
118 /* on a3xx, we need to scale up integer coords for isaml based
119 * on LoD:
120 */
121 bool unminify_coords;
122
123 /* on a4xx, for array textures we need to add 0.5 to the array
124 * index coordinate:
125 */
126 bool array_index_add_half;
127
128 /* on a4xx, bitmask of samplers which need astc+srgb workaround: */
129 unsigned astc_srgb;
130
131 unsigned max_texture_index;
132
133 /* set if we encounter something we can't handle yet, so we
134 * can bail cleanly and fallback to TGSI compiler f/e
135 */
136 bool error;
137 };
138
139 /* gpu pointer size in units of 32bit registers/slots */
140 static unsigned pointer_size(struct ir3_compile *ctx)
141 {
142 return (ctx->compiler->gpu_id >= 500) ? 2 : 1;
143 }
144
145 static struct ir3_instruction * create_immed(struct ir3_block *block, uint32_t val);
146 static struct ir3_block * get_block(struct ir3_compile *ctx, nir_block *nblock);
147
148
149 static struct ir3_compile *
150 compile_init(struct ir3_compiler *compiler,
151 struct ir3_shader_variant *so)
152 {
153 struct ir3_compile *ctx = rzalloc(NULL, struct ir3_compile);
154
155 if (compiler->gpu_id >= 400) {
156 /* need special handling for "flat" */
157 ctx->flat_bypass = true;
158 ctx->levels_add_one = false;
159 ctx->unminify_coords = false;
160 ctx->array_index_add_half = true;
161
162 if (so->type == SHADER_VERTEX)
163 ctx->astc_srgb = so->key.vastc_srgb;
164 else if (so->type == SHADER_FRAGMENT)
165 ctx->astc_srgb = so->key.fastc_srgb;
166
167 } else {
168 /* no special handling for "flat" */
169 ctx->flat_bypass = false;
170 ctx->levels_add_one = true;
171 ctx->unminify_coords = true;
172 ctx->array_index_add_half = false;
173 }
174
175 ctx->compiler = compiler;
176 ctx->ir = so->ir;
177 ctx->so = so;
178 ctx->def_ht = _mesa_hash_table_create(ctx,
179 _mesa_hash_pointer, _mesa_key_pointer_equal);
180 ctx->block_ht = _mesa_hash_table_create(ctx,
181 _mesa_hash_pointer, _mesa_key_pointer_equal);
182
183 /* TODO: maybe generate some sort of bitmask of what key
184 * lowers vs what shader has (ie. no need to lower
185 * texture clamp lowering if no texture sample instrs)..
186 * although should be done further up the stack to avoid
187 * creating duplicate variants..
188 */
189
190 if (ir3_key_lowers_nir(&so->key)) {
191 nir_shader *s = nir_shader_clone(ctx, so->shader->nir);
192 ctx->s = ir3_optimize_nir(so->shader, s, &so->key);
193 } else {
194 /* fast-path for shader key that lowers nothing in NIR: */
195 ctx->s = so->shader->nir;
196 }
197
198 if (fd_mesa_debug & FD_DBG_DISASM) {
199 DBG("dump nir%dv%d: type=%d, k={bp=%u,cts=%u,hp=%u}",
200 so->shader->id, so->id, so->type,
201 so->key.binning_pass, so->key.color_two_side,
202 so->key.half_precision);
203 nir_print_shader(ctx->s, stdout);
204 }
205
206 so->num_uniforms = ctx->s->num_uniforms;
207 so->num_ubos = ctx->s->info->num_ubos;
208
209 /* Layout of constant registers, each section aligned to vec4. Note
210 * that pointer size (ubo, etc) changes depending on generation.
211 *
212 * user consts
213 * UBO addresses
214 * if (vertex shader) {
215 * driver params (IR3_DP_*)
216 * if (stream_output.num_outputs > 0)
217 * stream-out addresses
218 * }
219 * immediates
220 *
221 * Immediates go last mostly because they are inserted in the CP pass
222 * after the nir -> ir3 frontend.
223 */
224 unsigned constoff = align(ctx->s->num_uniforms, 4);
225 unsigned ptrsz = pointer_size(ctx);
226
227 memset(&so->constbase, ~0, sizeof(so->constbase));
228
229 if (so->num_ubos > 0) {
230 so->constbase.ubo = constoff;
231 constoff += align(ctx->s->info->num_ubos * ptrsz, 4) / 4;
232 }
233
234 unsigned num_driver_params = 0;
235 if (so->type == SHADER_VERTEX) {
236 num_driver_params = IR3_DP_VS_COUNT;
237 } else if (so->type == SHADER_COMPUTE) {
238 num_driver_params = IR3_DP_CS_COUNT;
239 }
240
241 so->constbase.driver_param = constoff;
242 constoff += align(num_driver_params, 4) / 4;
243
244 if ((so->type == SHADER_VERTEX) &&
245 (compiler->gpu_id < 500) &&
246 so->shader->stream_output.num_outputs > 0) {
247 so->constbase.tfbo = constoff;
248 constoff += align(PIPE_MAX_SO_BUFFERS * ptrsz, 4) / 4;
249 }
250
251 so->constbase.immediate = constoff;
252
253 return ctx;
254 }
255
256 static void
257 compile_error(struct ir3_compile *ctx, const char *format, ...)
258 {
259 va_list ap;
260 va_start(ap, format);
261 _debug_vprintf(format, ap);
262 va_end(ap);
263 nir_print_shader(ctx->s, stdout);
264 ctx->error = true;
265 debug_assert(0);
266 }
267
268 #define compile_assert(ctx, cond) do { \
269 if (!(cond)) compile_error((ctx), "failed assert: "#cond"\n"); \
270 } while (0)
271
272 static void
273 compile_free(struct ir3_compile *ctx)
274 {
275 ralloc_free(ctx);
276 }
277
278 static void
279 declare_var(struct ir3_compile *ctx, nir_variable *var)
280 {
281 unsigned length = glsl_get_length(var->type) * 4; /* always vec4, at least with ttn */
282 struct ir3_array *arr = rzalloc(ctx, struct ir3_array);
283 arr->id = ++ctx->num_arrays;
284 arr->length = length;
285 arr->var = var;
286 list_addtail(&arr->node, &ctx->ir->array_list);
287 }
288
289 static struct ir3_array *
290 get_var(struct ir3_compile *ctx, nir_variable *var)
291 {
292 list_for_each_entry (struct ir3_array, arr, &ctx->ir->array_list, node) {
293 if (arr->var == var)
294 return arr;
295 }
296 compile_error(ctx, "bogus var: %s\n", var->name);
297 return NULL;
298 }
299
300 /* allocate a n element value array (to be populated by caller) and
301 * insert in def_ht
302 */
303 static struct ir3_instruction **
304 __get_dst(struct ir3_compile *ctx, void *key, unsigned n)
305 {
306 struct ir3_instruction **value =
307 ralloc_array(ctx->def_ht, struct ir3_instruction *, n);
308 _mesa_hash_table_insert(ctx->def_ht, key, value);
309 return value;
310 }
311
312 static struct ir3_instruction **
313 get_dst(struct ir3_compile *ctx, nir_dest *dst, unsigned n)
314 {
315 compile_assert(ctx, dst->is_ssa);
316 if (dst->is_ssa) {
317 return __get_dst(ctx, &dst->ssa, n);
318 } else {
319 return __get_dst(ctx, dst->reg.reg, n);
320 }
321 }
322
323 static struct ir3_instruction **
324 get_dst_ssa(struct ir3_compile *ctx, nir_ssa_def *dst, unsigned n)
325 {
326 return __get_dst(ctx, dst, n);
327 }
328
329 static struct ir3_instruction * const *
330 get_src(struct ir3_compile *ctx, nir_src *src)
331 {
332 struct hash_entry *entry;
333 compile_assert(ctx, src->is_ssa);
334 if (src->is_ssa) {
335 entry = _mesa_hash_table_search(ctx->def_ht, src->ssa);
336 } else {
337 entry = _mesa_hash_table_search(ctx->def_ht, src->reg.reg);
338 }
339 compile_assert(ctx, entry);
340 return entry->data;
341 }
342
343 static struct ir3_instruction *
344 create_immed(struct ir3_block *block, uint32_t val)
345 {
346 struct ir3_instruction *mov;
347
348 mov = ir3_instr_create(block, OPC_MOV);
349 mov->cat1.src_type = TYPE_U32;
350 mov->cat1.dst_type = TYPE_U32;
351 ir3_reg_create(mov, 0, 0);
352 ir3_reg_create(mov, 0, IR3_REG_IMMED)->uim_val = val;
353
354 return mov;
355 }
356
357 static struct ir3_instruction *
358 create_addr(struct ir3_block *block, struct ir3_instruction *src)
359 {
360 struct ir3_instruction *instr, *immed;
361
362 /* TODO in at least some cases, the backend could probably be
363 * made clever enough to propagate IR3_REG_HALF..
364 */
365 instr = ir3_COV(block, src, TYPE_U32, TYPE_S16);
366 instr->regs[0]->flags |= IR3_REG_HALF;
367
368 immed = create_immed(block, 2);
369 immed->regs[0]->flags |= IR3_REG_HALF;
370
371 instr = ir3_SHL_B(block, instr, 0, immed, 0);
372 instr->regs[0]->flags |= IR3_REG_HALF;
373 instr->regs[1]->flags |= IR3_REG_HALF;
374
375 instr = ir3_MOV(block, instr, TYPE_S16);
376 instr->regs[0]->num = regid(REG_A0, 0);
377 instr->regs[0]->flags |= IR3_REG_HALF;
378 instr->regs[1]->flags |= IR3_REG_HALF;
379
380 return instr;
381 }
382
383 /* caches addr values to avoid generating multiple cov/shl/mova
384 * sequences for each use of a given NIR level src as address
385 */
386 static struct ir3_instruction *
387 get_addr(struct ir3_compile *ctx, struct ir3_instruction *src)
388 {
389 struct ir3_instruction *addr;
390
391 if (!ctx->addr_ht) {
392 ctx->addr_ht = _mesa_hash_table_create(ctx,
393 _mesa_hash_pointer, _mesa_key_pointer_equal);
394 } else {
395 struct hash_entry *entry;
396 entry = _mesa_hash_table_search(ctx->addr_ht, src);
397 if (entry)
398 return entry->data;
399 }
400
401 addr = create_addr(ctx->block, src);
402 _mesa_hash_table_insert(ctx->addr_ht, src, addr);
403
404 return addr;
405 }
406
407 static struct ir3_instruction *
408 get_predicate(struct ir3_compile *ctx, struct ir3_instruction *src)
409 {
410 struct ir3_block *b = ctx->block;
411 struct ir3_instruction *cond;
412
413 /* NOTE: only cmps.*.* can write p0.x: */
414 cond = ir3_CMPS_S(b, src, 0, create_immed(b, 0), 0);
415 cond->cat2.condition = IR3_COND_NE;
416
417 /* condition always goes in predicate register: */
418 cond->regs[0]->num = regid(REG_P0, 0);
419
420 return cond;
421 }
422
423 static struct ir3_instruction *
424 create_uniform(struct ir3_compile *ctx, unsigned n)
425 {
426 struct ir3_instruction *mov;
427
428 mov = ir3_instr_create(ctx->block, OPC_MOV);
429 /* TODO get types right? */
430 mov->cat1.src_type = TYPE_F32;
431 mov->cat1.dst_type = TYPE_F32;
432 ir3_reg_create(mov, 0, 0);
433 ir3_reg_create(mov, n, IR3_REG_CONST);
434
435 return mov;
436 }
437
438 static struct ir3_instruction *
439 create_uniform_indirect(struct ir3_compile *ctx, int n,
440 struct ir3_instruction *address)
441 {
442 struct ir3_instruction *mov;
443
444 mov = ir3_instr_create(ctx->block, OPC_MOV);
445 mov->cat1.src_type = TYPE_U32;
446 mov->cat1.dst_type = TYPE_U32;
447 ir3_reg_create(mov, 0, 0);
448 ir3_reg_create(mov, 0, IR3_REG_CONST | IR3_REG_RELATIV)->array.offset = n;
449
450 ir3_instr_set_address(mov, address);
451
452 return mov;
453 }
454
455 static struct ir3_instruction *
456 create_collect(struct ir3_block *block, struct ir3_instruction *const *arr,
457 unsigned arrsz)
458 {
459 struct ir3_instruction *collect;
460
461 if (arrsz == 0)
462 return NULL;
463
464 collect = ir3_instr_create2(block, OPC_META_FI, 1 + arrsz);
465 ir3_reg_create(collect, 0, 0); /* dst */
466 for (unsigned i = 0; i < arrsz; i++)
467 ir3_reg_create(collect, 0, IR3_REG_SSA)->instr = arr[i];
468
469 return collect;
470 }
471
472 static struct ir3_instruction *
473 create_indirect_load(struct ir3_compile *ctx, unsigned arrsz, int n,
474 struct ir3_instruction *address, struct ir3_instruction *collect)
475 {
476 struct ir3_block *block = ctx->block;
477 struct ir3_instruction *mov;
478 struct ir3_register *src;
479
480 mov = ir3_instr_create(block, OPC_MOV);
481 mov->cat1.src_type = TYPE_U32;
482 mov->cat1.dst_type = TYPE_U32;
483 ir3_reg_create(mov, 0, 0);
484 src = ir3_reg_create(mov, 0, IR3_REG_SSA | IR3_REG_RELATIV);
485 src->instr = collect;
486 src->size = arrsz;
487 src->array.offset = n;
488
489 ir3_instr_set_address(mov, address);
490
491 return mov;
492 }
493
494 /* relative (indirect) if address!=NULL */
495 static struct ir3_instruction *
496 create_var_load(struct ir3_compile *ctx, struct ir3_array *arr, int n,
497 struct ir3_instruction *address)
498 {
499 struct ir3_block *block = ctx->block;
500 struct ir3_instruction *mov;
501 struct ir3_register *src;
502
503 mov = ir3_instr_create(block, OPC_MOV);
504 mov->cat1.src_type = TYPE_U32;
505 mov->cat1.dst_type = TYPE_U32;
506 ir3_reg_create(mov, 0, 0);
507 src = ir3_reg_create(mov, 0, IR3_REG_ARRAY |
508 COND(address, IR3_REG_RELATIV));
509 src->instr = arr->last_write;
510 src->size = arr->length;
511 src->array.id = arr->id;
512 src->array.offset = n;
513
514 if (address)
515 ir3_instr_set_address(mov, address);
516
517 arr->last_access = mov;
518
519 return mov;
520 }
521
522 /* relative (indirect) if address!=NULL */
523 static struct ir3_instruction *
524 create_var_store(struct ir3_compile *ctx, struct ir3_array *arr, int n,
525 struct ir3_instruction *src, struct ir3_instruction *address)
526 {
527 struct ir3_block *block = ctx->block;
528 struct ir3_instruction *mov;
529 struct ir3_register *dst;
530
531 mov = ir3_instr_create(block, OPC_MOV);
532 mov->cat1.src_type = TYPE_U32;
533 mov->cat1.dst_type = TYPE_U32;
534 dst = ir3_reg_create(mov, 0, IR3_REG_ARRAY |
535 COND(address, IR3_REG_RELATIV));
536 dst->instr = arr->last_access;
537 dst->size = arr->length;
538 dst->array.id = arr->id;
539 dst->array.offset = n;
540 ir3_reg_create(mov, 0, IR3_REG_SSA)->instr = src;
541
542 ir3_instr_set_address(mov, address);
543
544 arr->last_write = arr->last_access = mov;
545
546 return mov;
547 }
548
549 static struct ir3_instruction *
550 create_input_compmask(struct ir3_block *block, unsigned n, unsigned compmask)
551 {
552 struct ir3_instruction *in;
553
554 in = ir3_instr_create(block, OPC_META_INPUT);
555 in->inout.block = block;
556 ir3_reg_create(in, n, 0);
557
558 in->regs[0]->wrmask = compmask;
559
560 return in;
561 }
562
563 static struct ir3_instruction *
564 create_input(struct ir3_block *block, unsigned n)
565 {
566 return create_input_compmask(block, n, 0x1);
567 }
568
569 static struct ir3_instruction *
570 create_frag_input(struct ir3_compile *ctx, bool use_ldlv)
571 {
572 struct ir3_block *block = ctx->block;
573 struct ir3_instruction *instr;
574 /* actual inloc is assigned and fixed up later: */
575 struct ir3_instruction *inloc = create_immed(block, 0);
576
577 if (use_ldlv) {
578 instr = ir3_LDLV(block, inloc, 0, create_immed(block, 1), 0);
579 instr->cat6.type = TYPE_U32;
580 instr->cat6.iim_val = 1;
581 } else {
582 instr = ir3_BARY_F(block, inloc, 0, ctx->frag_pos, 0);
583 instr->regs[2]->wrmask = 0x3;
584 }
585
586 return instr;
587 }
588
589 static struct ir3_instruction *
590 create_frag_coord(struct ir3_compile *ctx, unsigned comp)
591 {
592 struct ir3_block *block = ctx->block;
593 struct ir3_instruction *instr;
594
595 compile_assert(ctx, !ctx->frag_coord[comp]);
596
597 ctx->frag_coord[comp] = create_input(ctx->block, 0);
598
599 switch (comp) {
600 case 0: /* .x */
601 case 1: /* .y */
602 /* for frag_coord, we get unsigned values.. we need
603 * to subtract (integer) 8 and divide by 16 (right-
604 * shift by 4) then convert to float:
605 *
606 * sub.s tmp, src, 8
607 * shr.b tmp, tmp, 4
608 * mov.u32f32 dst, tmp
609 *
610 */
611 instr = ir3_SUB_S(block, ctx->frag_coord[comp], 0,
612 create_immed(block, 8), 0);
613 instr = ir3_SHR_B(block, instr, 0,
614 create_immed(block, 4), 0);
615 instr = ir3_COV(block, instr, TYPE_U32, TYPE_F32);
616
617 return instr;
618 case 2: /* .z */
619 case 3: /* .w */
620 default:
621 /* seems that we can use these as-is: */
622 return ctx->frag_coord[comp];
623 }
624 }
625
626 static struct ir3_instruction *
627 create_driver_param(struct ir3_compile *ctx, enum ir3_driver_param dp)
628 {
629 /* first four vec4 sysval's reserved for UBOs: */
630 /* NOTE: dp is in scalar, but there can be >4 dp components: */
631 unsigned n = ctx->so->constbase.driver_param;
632 unsigned r = regid(n + dp / 4, dp % 4);
633 return create_uniform(ctx, r);
634 }
635
636 /* helper for instructions that produce multiple consecutive scalar
637 * outputs which need to have a split/fanout meta instruction inserted
638 */
639 static void
640 split_dest(struct ir3_block *block, struct ir3_instruction **dst,
641 struct ir3_instruction *src, unsigned base, unsigned n)
642 {
643 struct ir3_instruction *prev = NULL;
644 for (int i = 0, j = 0; i < n; i++) {
645 struct ir3_instruction *split = ir3_instr_create(block, OPC_META_FO);
646 ir3_reg_create(split, 0, IR3_REG_SSA);
647 ir3_reg_create(split, 0, IR3_REG_SSA)->instr = src;
648 split->fo.off = i + base;
649
650 if (prev) {
651 split->cp.left = prev;
652 split->cp.left_cnt++;
653 prev->cp.right = split;
654 prev->cp.right_cnt++;
655 }
656 prev = split;
657
658 if (src->regs[0]->wrmask & (1 << (i + base)))
659 dst[j++] = split;
660 }
661 }
662
663 /*
664 * Adreno uses uint rather than having dedicated bool type,
665 * which (potentially) requires some conversion, in particular
666 * when using output of an bool instr to int input, or visa
667 * versa.
668 *
669 * | Adreno | NIR |
670 * -------+---------+-------+-
671 * true | 1 | ~0 |
672 * false | 0 | 0 |
673 *
674 * To convert from an adreno bool (uint) to nir, use:
675 *
676 * absneg.s dst, (neg)src
677 *
678 * To convert back in the other direction:
679 *
680 * absneg.s dst, (abs)arc
681 *
682 * The CP step can clean up the absneg.s that cancel each other
683 * out, and with a slight bit of extra cleverness (to recognize
684 * the instructions which produce either a 0 or 1) can eliminate
685 * the absneg.s's completely when an instruction that wants
686 * 0/1 consumes the result. For example, when a nir 'bcsel'
687 * consumes the result of 'feq'. So we should be able to get by
688 * without a boolean resolve step, and without incuring any
689 * extra penalty in instruction count.
690 */
691
692 /* NIR bool -> native (adreno): */
693 static struct ir3_instruction *
694 ir3_b2n(struct ir3_block *block, struct ir3_instruction *instr)
695 {
696 return ir3_ABSNEG_S(block, instr, IR3_REG_SABS);
697 }
698
699 /* native (adreno) -> NIR bool: */
700 static struct ir3_instruction *
701 ir3_n2b(struct ir3_block *block, struct ir3_instruction *instr)
702 {
703 return ir3_ABSNEG_S(block, instr, IR3_REG_SNEG);
704 }
705
706 /*
707 * alu/sfu instructions:
708 */
709
710 static void
711 emit_alu(struct ir3_compile *ctx, nir_alu_instr *alu)
712 {
713 const nir_op_info *info = &nir_op_infos[alu->op];
714 struct ir3_instruction **dst, *src[info->num_inputs];
715 struct ir3_block *b = ctx->block;
716
717 dst = get_dst(ctx, &alu->dest.dest, MAX2(info->output_size, 1));
718
719 /* Vectors are special in that they have non-scalarized writemasks,
720 * and just take the first swizzle channel for each argument in
721 * order into each writemask channel.
722 */
723 if ((alu->op == nir_op_vec2) ||
724 (alu->op == nir_op_vec3) ||
725 (alu->op == nir_op_vec4)) {
726
727 for (int i = 0; i < info->num_inputs; i++) {
728 nir_alu_src *asrc = &alu->src[i];
729
730 compile_assert(ctx, !asrc->abs);
731 compile_assert(ctx, !asrc->negate);
732
733 src[i] = get_src(ctx, &asrc->src)[asrc->swizzle[0]];
734 if (!src[i])
735 src[i] = create_immed(ctx->block, 0);
736 dst[i] = ir3_MOV(b, src[i], TYPE_U32);
737 }
738
739 return;
740 }
741
742 /* General case: We can just grab the one used channel per src. */
743 for (int i = 0; i < info->num_inputs; i++) {
744 unsigned chan = ffs(alu->dest.write_mask) - 1;
745 nir_alu_src *asrc = &alu->src[i];
746
747 compile_assert(ctx, !asrc->abs);
748 compile_assert(ctx, !asrc->negate);
749
750 src[i] = get_src(ctx, &asrc->src)[asrc->swizzle[chan]];
751
752 compile_assert(ctx, src[i]);
753 }
754
755 switch (alu->op) {
756 case nir_op_f2i32:
757 dst[0] = ir3_COV(b, src[0], TYPE_F32, TYPE_S32);
758 break;
759 case nir_op_f2u32:
760 dst[0] = ir3_COV(b, src[0], TYPE_F32, TYPE_U32);
761 break;
762 case nir_op_i2f32:
763 dst[0] = ir3_COV(b, src[0], TYPE_S32, TYPE_F32);
764 break;
765 case nir_op_u2f32:
766 dst[0] = ir3_COV(b, src[0], TYPE_U32, TYPE_F32);
767 break;
768 case nir_op_imov:
769 dst[0] = ir3_MOV(b, src[0], TYPE_S32);
770 break;
771 case nir_op_fmov:
772 dst[0] = ir3_MOV(b, src[0], TYPE_F32);
773 break;
774 case nir_op_f2b:
775 dst[0] = ir3_CMPS_F(b, src[0], 0, create_immed(b, fui(0.0)), 0);
776 dst[0]->cat2.condition = IR3_COND_NE;
777 dst[0] = ir3_n2b(b, dst[0]);
778 break;
779 case nir_op_b2f:
780 dst[0] = ir3_COV(b, ir3_b2n(b, src[0]), TYPE_U32, TYPE_F32);
781 break;
782 case nir_op_b2i:
783 dst[0] = ir3_b2n(b, src[0]);
784 break;
785 case nir_op_i2b:
786 dst[0] = ir3_CMPS_S(b, src[0], 0, create_immed(b, 0), 0);
787 dst[0]->cat2.condition = IR3_COND_NE;
788 dst[0] = ir3_n2b(b, dst[0]);
789 break;
790
791 case nir_op_fneg:
792 dst[0] = ir3_ABSNEG_F(b, src[0], IR3_REG_FNEG);
793 break;
794 case nir_op_fabs:
795 dst[0] = ir3_ABSNEG_F(b, src[0], IR3_REG_FABS);
796 break;
797 case nir_op_fmax:
798 dst[0] = ir3_MAX_F(b, src[0], 0, src[1], 0);
799 break;
800 case nir_op_fmin:
801 dst[0] = ir3_MIN_F(b, src[0], 0, src[1], 0);
802 break;
803 case nir_op_fmul:
804 dst[0] = ir3_MUL_F(b, src[0], 0, src[1], 0);
805 break;
806 case nir_op_fadd:
807 dst[0] = ir3_ADD_F(b, src[0], 0, src[1], 0);
808 break;
809 case nir_op_fsub:
810 dst[0] = ir3_ADD_F(b, src[0], 0, src[1], IR3_REG_FNEG);
811 break;
812 case nir_op_ffma:
813 dst[0] = ir3_MAD_F32(b, src[0], 0, src[1], 0, src[2], 0);
814 break;
815 case nir_op_fddx:
816 dst[0] = ir3_DSX(b, src[0], 0);
817 dst[0]->cat5.type = TYPE_F32;
818 break;
819 case nir_op_fddy:
820 dst[0] = ir3_DSY(b, src[0], 0);
821 dst[0]->cat5.type = TYPE_F32;
822 break;
823 break;
824 case nir_op_flt:
825 dst[0] = ir3_CMPS_F(b, src[0], 0, src[1], 0);
826 dst[0]->cat2.condition = IR3_COND_LT;
827 dst[0] = ir3_n2b(b, dst[0]);
828 break;
829 case nir_op_fge:
830 dst[0] = ir3_CMPS_F(b, src[0], 0, src[1], 0);
831 dst[0]->cat2.condition = IR3_COND_GE;
832 dst[0] = ir3_n2b(b, dst[0]);
833 break;
834 case nir_op_feq:
835 dst[0] = ir3_CMPS_F(b, src[0], 0, src[1], 0);
836 dst[0]->cat2.condition = IR3_COND_EQ;
837 dst[0] = ir3_n2b(b, dst[0]);
838 break;
839 case nir_op_fne:
840 dst[0] = ir3_CMPS_F(b, src[0], 0, src[1], 0);
841 dst[0]->cat2.condition = IR3_COND_NE;
842 dst[0] = ir3_n2b(b, dst[0]);
843 break;
844 case nir_op_fceil:
845 dst[0] = ir3_CEIL_F(b, src[0], 0);
846 break;
847 case nir_op_ffloor:
848 dst[0] = ir3_FLOOR_F(b, src[0], 0);
849 break;
850 case nir_op_ftrunc:
851 dst[0] = ir3_TRUNC_F(b, src[0], 0);
852 break;
853 case nir_op_fround_even:
854 dst[0] = ir3_RNDNE_F(b, src[0], 0);
855 break;
856 case nir_op_fsign:
857 dst[0] = ir3_SIGN_F(b, src[0], 0);
858 break;
859
860 case nir_op_fsin:
861 dst[0] = ir3_SIN(b, src[0], 0);
862 break;
863 case nir_op_fcos:
864 dst[0] = ir3_COS(b, src[0], 0);
865 break;
866 case nir_op_frsq:
867 dst[0] = ir3_RSQ(b, src[0], 0);
868 break;
869 case nir_op_frcp:
870 dst[0] = ir3_RCP(b, src[0], 0);
871 break;
872 case nir_op_flog2:
873 dst[0] = ir3_LOG2(b, src[0], 0);
874 break;
875 case nir_op_fexp2:
876 dst[0] = ir3_EXP2(b, src[0], 0);
877 break;
878 case nir_op_fsqrt:
879 dst[0] = ir3_SQRT(b, src[0], 0);
880 break;
881
882 case nir_op_iabs:
883 dst[0] = ir3_ABSNEG_S(b, src[0], IR3_REG_SABS);
884 break;
885 case nir_op_iadd:
886 dst[0] = ir3_ADD_U(b, src[0], 0, src[1], 0);
887 break;
888 case nir_op_iand:
889 dst[0] = ir3_AND_B(b, src[0], 0, src[1], 0);
890 break;
891 case nir_op_imax:
892 dst[0] = ir3_MAX_S(b, src[0], 0, src[1], 0);
893 break;
894 case nir_op_umax:
895 dst[0] = ir3_MAX_U(b, src[0], 0, src[1], 0);
896 break;
897 case nir_op_imin:
898 dst[0] = ir3_MIN_S(b, src[0], 0, src[1], 0);
899 break;
900 case nir_op_umin:
901 dst[0] = ir3_MIN_U(b, src[0], 0, src[1], 0);
902 break;
903 case nir_op_imul:
904 /*
905 * dst = (al * bl) + (ah * bl << 16) + (al * bh << 16)
906 * mull.u tmp0, a, b ; mul low, i.e. al * bl
907 * madsh.m16 tmp1, a, b, tmp0 ; mul-add shift high mix, i.e. ah * bl << 16
908 * madsh.m16 dst, b, a, tmp1 ; i.e. al * bh << 16
909 */
910 dst[0] = ir3_MADSH_M16(b, src[1], 0, src[0], 0,
911 ir3_MADSH_M16(b, src[0], 0, src[1], 0,
912 ir3_MULL_U(b, src[0], 0, src[1], 0), 0), 0);
913 break;
914 case nir_op_ineg:
915 dst[0] = ir3_ABSNEG_S(b, src[0], IR3_REG_SNEG);
916 break;
917 case nir_op_inot:
918 dst[0] = ir3_NOT_B(b, src[0], 0);
919 break;
920 case nir_op_ior:
921 dst[0] = ir3_OR_B(b, src[0], 0, src[1], 0);
922 break;
923 case nir_op_ishl:
924 dst[0] = ir3_SHL_B(b, src[0], 0, src[1], 0);
925 break;
926 case nir_op_ishr:
927 dst[0] = ir3_ASHR_B(b, src[0], 0, src[1], 0);
928 break;
929 case nir_op_isign: {
930 /* maybe this would be sane to lower in nir.. */
931 struct ir3_instruction *neg, *pos;
932
933 neg = ir3_CMPS_S(b, src[0], 0, create_immed(b, 0), 0);
934 neg->cat2.condition = IR3_COND_LT;
935
936 pos = ir3_CMPS_S(b, src[0], 0, create_immed(b, 0), 0);
937 pos->cat2.condition = IR3_COND_GT;
938
939 dst[0] = ir3_SUB_U(b, pos, 0, neg, 0);
940
941 break;
942 }
943 case nir_op_isub:
944 dst[0] = ir3_SUB_U(b, src[0], 0, src[1], 0);
945 break;
946 case nir_op_ixor:
947 dst[0] = ir3_XOR_B(b, src[0], 0, src[1], 0);
948 break;
949 case nir_op_ushr:
950 dst[0] = ir3_SHR_B(b, src[0], 0, src[1], 0);
951 break;
952 case nir_op_ilt:
953 dst[0] = ir3_CMPS_S(b, src[0], 0, src[1], 0);
954 dst[0]->cat2.condition = IR3_COND_LT;
955 dst[0] = ir3_n2b(b, dst[0]);
956 break;
957 case nir_op_ige:
958 dst[0] = ir3_CMPS_S(b, src[0], 0, src[1], 0);
959 dst[0]->cat2.condition = IR3_COND_GE;
960 dst[0] = ir3_n2b(b, dst[0]);
961 break;
962 case nir_op_ieq:
963 dst[0] = ir3_CMPS_S(b, src[0], 0, src[1], 0);
964 dst[0]->cat2.condition = IR3_COND_EQ;
965 dst[0] = ir3_n2b(b, dst[0]);
966 break;
967 case nir_op_ine:
968 dst[0] = ir3_CMPS_S(b, src[0], 0, src[1], 0);
969 dst[0]->cat2.condition = IR3_COND_NE;
970 dst[0] = ir3_n2b(b, dst[0]);
971 break;
972 case nir_op_ult:
973 dst[0] = ir3_CMPS_U(b, src[0], 0, src[1], 0);
974 dst[0]->cat2.condition = IR3_COND_LT;
975 dst[0] = ir3_n2b(b, dst[0]);
976 break;
977 case nir_op_uge:
978 dst[0] = ir3_CMPS_U(b, src[0], 0, src[1], 0);
979 dst[0]->cat2.condition = IR3_COND_GE;
980 dst[0] = ir3_n2b(b, dst[0]);
981 break;
982
983 case nir_op_bcsel:
984 dst[0] = ir3_SEL_B32(b, src[1], 0, ir3_b2n(b, src[0]), 0, src[2], 0);
985 break;
986
987 case nir_op_bit_count:
988 dst[0] = ir3_CBITS_B(b, src[0], 0);
989 break;
990 case nir_op_ifind_msb: {
991 struct ir3_instruction *cmp;
992 dst[0] = ir3_CLZ_S(b, src[0], 0);
993 cmp = ir3_CMPS_S(b, dst[0], 0, create_immed(b, 0), 0);
994 cmp->cat2.condition = IR3_COND_GE;
995 dst[0] = ir3_SEL_B32(b,
996 ir3_SUB_U(b, create_immed(b, 31), 0, dst[0], 0), 0,
997 cmp, 0, dst[0], 0);
998 break;
999 }
1000 case nir_op_ufind_msb:
1001 dst[0] = ir3_CLZ_B(b, src[0], 0);
1002 dst[0] = ir3_SEL_B32(b,
1003 ir3_SUB_U(b, create_immed(b, 31), 0, dst[0], 0), 0,
1004 src[0], 0, dst[0], 0);
1005 break;
1006 case nir_op_find_lsb:
1007 dst[0] = ir3_BFREV_B(b, src[0], 0);
1008 dst[0] = ir3_CLZ_B(b, dst[0], 0);
1009 break;
1010 case nir_op_bitfield_reverse:
1011 dst[0] = ir3_BFREV_B(b, src[0], 0);
1012 break;
1013
1014 default:
1015 compile_error(ctx, "Unhandled ALU op: %s\n",
1016 nir_op_infos[alu->op].name);
1017 break;
1018 }
1019 }
1020
1021 /* handles direct/indirect UBO reads: */
1022 static void
1023 emit_intrinsic_load_ubo(struct ir3_compile *ctx, nir_intrinsic_instr *intr,
1024 struct ir3_instruction **dst)
1025 {
1026 struct ir3_block *b = ctx->block;
1027 struct ir3_instruction *base_lo, *base_hi, *addr, *src0, *src1;
1028 nir_const_value *const_offset;
1029 /* UBO addresses are the first driver params: */
1030 unsigned ubo = regid(ctx->so->constbase.ubo, 0);
1031 const unsigned ptrsz = pointer_size(ctx);
1032
1033 int off = 0;
1034
1035 /* First src is ubo index, which could either be an immed or not: */
1036 src0 = get_src(ctx, &intr->src[0])[0];
1037 if (is_same_type_mov(src0) &&
1038 (src0->regs[1]->flags & IR3_REG_IMMED)) {
1039 base_lo = create_uniform(ctx, ubo + (src0->regs[1]->iim_val * ptrsz));
1040 base_hi = create_uniform(ctx, ubo + (src0->regs[1]->iim_val * ptrsz) + 1);
1041 } else {
1042 base_lo = create_uniform_indirect(ctx, ubo, get_addr(ctx, src0));
1043 base_hi = create_uniform_indirect(ctx, ubo + 1, get_addr(ctx, src0));
1044 }
1045
1046 /* note: on 32bit gpu's base_hi is ignored and DCE'd */
1047 addr = base_lo;
1048
1049 const_offset = nir_src_as_const_value(intr->src[1]);
1050 if (const_offset) {
1051 off += const_offset->u32[0];
1052 } else {
1053 /* For load_ubo_indirect, second src is indirect offset: */
1054 src1 = get_src(ctx, &intr->src[1])[0];
1055
1056 /* and add offset to addr: */
1057 addr = ir3_ADD_S(b, addr, 0, src1, 0);
1058 }
1059
1060 /* if offset is to large to encode in the ldg, split it out: */
1061 if ((off + (intr->num_components * 4)) > 1024) {
1062 /* split out the minimal amount to improve the odds that
1063 * cp can fit the immediate in the add.s instruction:
1064 */
1065 unsigned off2 = off + (intr->num_components * 4) - 1024;
1066 addr = ir3_ADD_S(b, addr, 0, create_immed(b, off2), 0);
1067 off -= off2;
1068 }
1069
1070 if (ptrsz == 2) {
1071 struct ir3_instruction *carry;
1072
1073 /* handle 32b rollover, ie:
1074 * if (addr < base_lo)
1075 * base_hi++
1076 */
1077 carry = ir3_CMPS_U(b, addr, 0, base_lo, 0);
1078 carry->cat2.condition = IR3_COND_LT;
1079 base_hi = ir3_ADD_S(b, base_hi, 0, carry, 0);
1080
1081 addr = create_collect(b, (struct ir3_instruction*[]){ addr, base_hi }, 2);
1082 }
1083
1084 for (int i = 0; i < intr->num_components; i++) {
1085 struct ir3_instruction *load =
1086 ir3_LDG(b, addr, 0, create_immed(b, 1), 0);
1087 load->cat6.type = TYPE_U32;
1088 load->cat6.src_offset = off + i * 4; /* byte offset */
1089 dst[i] = load;
1090 }
1091 }
1092
1093 /* handles array reads: */
1094 static void
1095 emit_intrinsic_load_var(struct ir3_compile *ctx, nir_intrinsic_instr *intr,
1096 struct ir3_instruction **dst)
1097 {
1098 nir_deref_var *dvar = intr->variables[0];
1099 nir_deref_array *darr = nir_deref_as_array(dvar->deref.child);
1100 struct ir3_array *arr = get_var(ctx, dvar->var);
1101
1102 compile_assert(ctx, dvar->deref.child &&
1103 (dvar->deref.child->deref_type == nir_deref_type_array));
1104
1105 switch (darr->deref_array_type) {
1106 case nir_deref_array_type_direct:
1107 /* direct access does not require anything special: */
1108 for (int i = 0; i < intr->num_components; i++) {
1109 unsigned n = darr->base_offset * 4 + i;
1110 compile_assert(ctx, n < arr->length);
1111 dst[i] = create_var_load(ctx, arr, n, NULL);
1112 }
1113 break;
1114 case nir_deref_array_type_indirect: {
1115 /* for indirect, we need to collect all the array elements: */
1116 struct ir3_instruction *addr =
1117 get_addr(ctx, get_src(ctx, &darr->indirect)[0]);
1118 for (int i = 0; i < intr->num_components; i++) {
1119 unsigned n = darr->base_offset * 4 + i;
1120 compile_assert(ctx, n < arr->length);
1121 dst[i] = create_var_load(ctx, arr, n, addr);
1122 }
1123 break;
1124 }
1125 default:
1126 compile_error(ctx, "Unhandled load deref type: %u\n",
1127 darr->deref_array_type);
1128 break;
1129 }
1130 }
1131
1132 /* handles array writes: */
1133 static void
1134 emit_intrinsic_store_var(struct ir3_compile *ctx, nir_intrinsic_instr *intr)
1135 {
1136 nir_deref_var *dvar = intr->variables[0];
1137 nir_deref_array *darr = nir_deref_as_array(dvar->deref.child);
1138 struct ir3_array *arr = get_var(ctx, dvar->var);
1139 struct ir3_instruction *addr;
1140 struct ir3_instruction * const *src;
1141 unsigned wrmask = nir_intrinsic_write_mask(intr);
1142
1143 compile_assert(ctx, dvar->deref.child &&
1144 (dvar->deref.child->deref_type == nir_deref_type_array));
1145
1146 src = get_src(ctx, &intr->src[0]);
1147
1148 switch (darr->deref_array_type) {
1149 case nir_deref_array_type_direct:
1150 addr = NULL;
1151 break;
1152 case nir_deref_array_type_indirect:
1153 addr = get_addr(ctx, get_src(ctx, &darr->indirect)[0]);
1154 break;
1155 default:
1156 compile_error(ctx, "Unhandled store deref type: %u\n",
1157 darr->deref_array_type);
1158 return;
1159 }
1160
1161 for (int i = 0; i < intr->num_components; i++) {
1162 if (!(wrmask & (1 << i)))
1163 continue;
1164 unsigned n = darr->base_offset * 4 + i;
1165 compile_assert(ctx, n < arr->length);
1166 create_var_store(ctx, arr, n, src[i], addr);
1167 }
1168 }
1169
1170 static void
1171 mark_ssbo_read(struct ir3_compile *ctx, struct ir3_instruction *instr)
1172 {
1173 instr->regs[0]->instr = ctx->last_write;
1174 instr->regs[0]->flags |= IR3_REG_SSA;
1175 ctx->last_access = instr;
1176 }
1177
1178 static void
1179 mark_ssbo_write(struct ir3_compile *ctx, struct ir3_instruction *instr)
1180 {
1181 instr->regs[0]->instr = ctx->last_access;
1182 instr->regs[0]->flags |= IR3_REG_SSA;
1183 ctx->last_write = ctx->last_access = instr;
1184 }
1185
1186 static void
1187 emit_intrinsic_load_ssbo(struct ir3_compile *ctx, nir_intrinsic_instr *intr,
1188 struct ir3_instruction **dst)
1189 {
1190 struct ir3_block *b = ctx->block;
1191 struct ir3_instruction *ldgb, *src0, *src1, *offset;
1192 nir_const_value *const_offset;
1193
1194 /* can this be non-const buffer_index? how do we handle that? */
1195 const_offset = nir_src_as_const_value(intr->src[0]);
1196 compile_assert(ctx, const_offset);
1197
1198 offset = get_src(ctx, &intr->src[1])[0];
1199
1200 /* src0 is uvec2(offset*4, 0), src1 is offset.. nir already *= 4: */
1201 src0 = create_collect(b, (struct ir3_instruction*[]){
1202 offset,
1203 create_immed(b, 0),
1204 }, 2);
1205 src1 = ir3_SHR_B(b, offset, 0, create_immed(b, 2), 0);
1206
1207 ldgb = ir3_LDGB(b, create_immed(b, const_offset->u32[0]), 0,
1208 src0, 0, src1, 0);
1209 ldgb->regs[0]->wrmask = (1 << intr->num_components) - 1;
1210 ldgb->cat6.iim_val = intr->num_components;
1211 ldgb->cat6.type = TYPE_U32;
1212 mark_ssbo_read(ctx, ldgb);
1213
1214 split_dest(b, dst, ldgb, 0, intr->num_components);
1215 }
1216
1217 /* src[] = { value, block_index, offset }. const_index[] = { write_mask } */
1218 static void
1219 emit_intrinsic_store_ssbo(struct ir3_compile *ctx, nir_intrinsic_instr *intr)
1220 {
1221 struct ir3_block *b = ctx->block;
1222 struct ir3_instruction *stgb, *src0, *src1, *src2, *offset;
1223 nir_const_value *const_offset;
1224 unsigned ncomp = ffs(~intr->const_index[0]) - 1;
1225
1226 /* can this be non-const buffer_index? how do we handle that? */
1227 const_offset = nir_src_as_const_value(intr->src[1]);
1228 compile_assert(ctx, const_offset);
1229
1230 offset = get_src(ctx, &intr->src[2])[0];
1231
1232 /* src0 is value, src1 is offset, src2 is uvec2(offset*4, 0)..
1233 * nir already *= 4:
1234 */
1235 src0 = create_collect(b, get_src(ctx, &intr->src[0]), ncomp);
1236 src1 = ir3_SHR_B(b, offset, 0, create_immed(b, 2), 0);
1237 src2 = create_collect(b, (struct ir3_instruction*[]){
1238 offset,
1239 create_immed(b, 0),
1240 }, 2);
1241
1242 stgb = ir3_STGB(b, create_immed(b, const_offset->u32[0]), 0,
1243 src0, 0, src1, 0, src2, 0);
1244 stgb->cat6.iim_val = ncomp;
1245 stgb->cat6.type = TYPE_U32;
1246 mark_ssbo_write(ctx, stgb);
1247
1248 array_insert(b, b->keeps, stgb);
1249 }
1250
1251 static void
1252 emit_intrinsic_atomic(struct ir3_compile *ctx, nir_intrinsic_instr *intr)
1253 {
1254 struct ir3_block *b = ctx->block;
1255 struct ir3_instruction *atomic, *ssbo, *src0, *src1, *src2, *offset;
1256 nir_const_value *const_offset;
1257 type_t type = TYPE_U32;
1258
1259 /* can this be non-const buffer_index? how do we handle that? */
1260 const_offset = nir_src_as_const_value(intr->src[0]);
1261 compile_assert(ctx, const_offset);
1262 ssbo = create_immed(b, const_offset->u32[0]);
1263
1264 offset = get_src(ctx, &intr->src[1])[0];
1265
1266 /* src0 is data (or uvec2(data, compare)
1267 * src1 is offset
1268 * src2 is uvec2(offset*4, 0)
1269 *
1270 * Note that nir already multiplies the offset by four
1271 */
1272 src0 = get_src(ctx, &intr->src[2])[0];
1273 src1 = ir3_SHR_B(b, offset, 0, create_immed(b, 2), 0);
1274 src2 = create_collect(b, (struct ir3_instruction*[]){
1275 offset,
1276 create_immed(b, 0),
1277 }, 2);
1278
1279 switch (intr->intrinsic) {
1280 case nir_intrinsic_ssbo_atomic_add:
1281 atomic = ir3_ATOMIC_ADD(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
1282 break;
1283 case nir_intrinsic_ssbo_atomic_imin:
1284 atomic = ir3_ATOMIC_MIN(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
1285 type = TYPE_S32;
1286 break;
1287 case nir_intrinsic_ssbo_atomic_umin:
1288 atomic = ir3_ATOMIC_MIN(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
1289 break;
1290 case nir_intrinsic_ssbo_atomic_imax:
1291 atomic = ir3_ATOMIC_MAX(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
1292 type = TYPE_S32;
1293 break;
1294 case nir_intrinsic_ssbo_atomic_umax:
1295 atomic = ir3_ATOMIC_MAX(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
1296 break;
1297 case nir_intrinsic_ssbo_atomic_and:
1298 atomic = ir3_ATOMIC_AND(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
1299 break;
1300 case nir_intrinsic_ssbo_atomic_or:
1301 atomic = ir3_ATOMIC_OR(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
1302 break;
1303 case nir_intrinsic_ssbo_atomic_xor:
1304 atomic = ir3_ATOMIC_XOR(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
1305 break;
1306 case nir_intrinsic_ssbo_atomic_exchange:
1307 atomic = ir3_ATOMIC_XCHG(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
1308 break;
1309 case nir_intrinsic_ssbo_atomic_comp_swap:
1310 /* for cmpxchg, src0 is [ui]vec2(data, compare): */
1311 src0 = create_collect(b, (struct ir3_instruction*[]){
1312 src0,
1313 get_src(ctx, &intr->src[3])[0],
1314 }, 2);
1315 atomic = ir3_ATOMIC_CMPXCHG(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
1316 break;
1317 default:
1318 unreachable("boo");
1319 }
1320
1321 atomic->cat6.iim_val = 1;
1322 atomic->cat6.type = type;
1323 mark_ssbo_write(ctx, atomic);
1324
1325 /* even if nothing consume the result, we can't DCE the instruction: */
1326 array_insert(b, b->keeps, atomic);
1327 }
1328
1329 static void add_sysval_input_compmask(struct ir3_compile *ctx,
1330 gl_system_value slot, unsigned compmask,
1331 struct ir3_instruction *instr)
1332 {
1333 struct ir3_shader_variant *so = ctx->so;
1334 unsigned r = regid(so->inputs_count, 0);
1335 unsigned n = so->inputs_count++;
1336
1337 so->inputs[n].sysval = true;
1338 so->inputs[n].slot = slot;
1339 so->inputs[n].compmask = compmask;
1340 so->inputs[n].regid = r;
1341 so->inputs[n].interpolate = INTERP_MODE_FLAT;
1342 so->total_in++;
1343
1344 ctx->ir->ninputs = MAX2(ctx->ir->ninputs, r + 1);
1345 ctx->ir->inputs[r] = instr;
1346 }
1347
1348 static void add_sysval_input(struct ir3_compile *ctx, gl_system_value slot,
1349 struct ir3_instruction *instr)
1350 {
1351 add_sysval_input_compmask(ctx, slot, 0x1, instr);
1352 }
1353
1354 static void
1355 emit_intrinsic(struct ir3_compile *ctx, nir_intrinsic_instr *intr)
1356 {
1357 const nir_intrinsic_info *info = &nir_intrinsic_infos[intr->intrinsic];
1358 struct ir3_instruction **dst;
1359 struct ir3_instruction * const *src;
1360 struct ir3_block *b = ctx->block;
1361 nir_const_value *const_offset;
1362 int idx;
1363
1364 if (info->has_dest) {
1365 dst = get_dst(ctx, &intr->dest, intr->num_components);
1366 } else {
1367 dst = NULL;
1368 }
1369
1370 switch (intr->intrinsic) {
1371 case nir_intrinsic_load_uniform:
1372 idx = nir_intrinsic_base(intr);
1373 const_offset = nir_src_as_const_value(intr->src[0]);
1374 if (const_offset) {
1375 idx += const_offset->u32[0];
1376 for (int i = 0; i < intr->num_components; i++) {
1377 unsigned n = idx * 4 + i;
1378 dst[i] = create_uniform(ctx, n);
1379 }
1380 } else {
1381 src = get_src(ctx, &intr->src[0]);
1382 for (int i = 0; i < intr->num_components; i++) {
1383 int n = idx * 4 + i;
1384 dst[i] = create_uniform_indirect(ctx, n,
1385 get_addr(ctx, src[0]));
1386 }
1387 /* NOTE: if relative addressing is used, we set
1388 * constlen in the compiler (to worst-case value)
1389 * since we don't know in the assembler what the max
1390 * addr reg value can be:
1391 */
1392 ctx->so->constlen = ctx->s->num_uniforms;
1393 }
1394 break;
1395 case nir_intrinsic_load_ubo:
1396 emit_intrinsic_load_ubo(ctx, intr, dst);
1397 break;
1398 case nir_intrinsic_load_input:
1399 idx = nir_intrinsic_base(intr);
1400 const_offset = nir_src_as_const_value(intr->src[0]);
1401 if (const_offset) {
1402 idx += const_offset->u32[0];
1403 for (int i = 0; i < intr->num_components; i++) {
1404 unsigned n = idx * 4 + i;
1405 dst[i] = ctx->ir->inputs[n];
1406 }
1407 } else {
1408 src = get_src(ctx, &intr->src[0]);
1409 struct ir3_instruction *collect =
1410 create_collect(b, ctx->ir->inputs, ctx->ir->ninputs);
1411 struct ir3_instruction *addr = get_addr(ctx, src[0]);
1412 for (int i = 0; i < intr->num_components; i++) {
1413 unsigned n = idx * 4 + i;
1414 dst[i] = create_indirect_load(ctx, ctx->ir->ninputs,
1415 n, addr, collect);
1416 }
1417 }
1418 break;
1419 case nir_intrinsic_load_var:
1420 emit_intrinsic_load_var(ctx, intr, dst);
1421 break;
1422 case nir_intrinsic_store_var:
1423 emit_intrinsic_store_var(ctx, intr);
1424 break;
1425 case nir_intrinsic_load_ssbo:
1426 emit_intrinsic_load_ssbo(ctx, intr, dst);
1427 break;
1428 case nir_intrinsic_store_ssbo:
1429 emit_intrinsic_store_ssbo(ctx, intr);
1430 break;
1431 case nir_intrinsic_ssbo_atomic_add:
1432 case nir_intrinsic_ssbo_atomic_imin:
1433 case nir_intrinsic_ssbo_atomic_umin:
1434 case nir_intrinsic_ssbo_atomic_imax:
1435 case nir_intrinsic_ssbo_atomic_umax:
1436 case nir_intrinsic_ssbo_atomic_and:
1437 case nir_intrinsic_ssbo_atomic_or:
1438 case nir_intrinsic_ssbo_atomic_xor:
1439 case nir_intrinsic_ssbo_atomic_exchange:
1440 case nir_intrinsic_ssbo_atomic_comp_swap:
1441 emit_intrinsic_atomic(ctx, intr);
1442 break;
1443 case nir_intrinsic_store_output:
1444 idx = nir_intrinsic_base(intr);
1445 const_offset = nir_src_as_const_value(intr->src[1]);
1446 compile_assert(ctx, const_offset != NULL);
1447 idx += const_offset->u32[0];
1448
1449 src = get_src(ctx, &intr->src[0]);
1450 for (int i = 0; i < intr->num_components; i++) {
1451 unsigned n = idx * 4 + i;
1452 ctx->ir->outputs[n] = src[i];
1453 }
1454 break;
1455 case nir_intrinsic_load_base_vertex:
1456 if (!ctx->basevertex) {
1457 ctx->basevertex = create_driver_param(ctx, IR3_DP_VTXID_BASE);
1458 add_sysval_input(ctx, SYSTEM_VALUE_BASE_VERTEX,
1459 ctx->basevertex);
1460 }
1461 dst[0] = ctx->basevertex;
1462 break;
1463 case nir_intrinsic_load_vertex_id_zero_base:
1464 case nir_intrinsic_load_vertex_id:
1465 if (!ctx->vertex_id) {
1466 gl_system_value sv = (intr->intrinsic == nir_intrinsic_load_vertex_id) ?
1467 SYSTEM_VALUE_VERTEX_ID : SYSTEM_VALUE_VERTEX_ID_ZERO_BASE;
1468 ctx->vertex_id = create_input(b, 0);
1469 add_sysval_input(ctx, sv, ctx->vertex_id);
1470 }
1471 dst[0] = ctx->vertex_id;
1472 break;
1473 case nir_intrinsic_load_instance_id:
1474 if (!ctx->instance_id) {
1475 ctx->instance_id = create_input(b, 0);
1476 add_sysval_input(ctx, SYSTEM_VALUE_INSTANCE_ID,
1477 ctx->instance_id);
1478 }
1479 dst[0] = ctx->instance_id;
1480 break;
1481 case nir_intrinsic_load_user_clip_plane:
1482 idx = nir_intrinsic_ucp_id(intr);
1483 for (int i = 0; i < intr->num_components; i++) {
1484 unsigned n = idx * 4 + i;
1485 dst[i] = create_driver_param(ctx, IR3_DP_UCP0_X + n);
1486 }
1487 break;
1488 case nir_intrinsic_load_front_face:
1489 if (!ctx->frag_face) {
1490 ctx->so->frag_face = true;
1491 ctx->frag_face = create_input(b, 0);
1492 ctx->frag_face->regs[0]->flags |= IR3_REG_HALF;
1493 }
1494 /* for fragface, we always get -1 or 0, but that is inverse
1495 * of what nir expects (where ~0 is true). Unfortunately
1496 * trying to widen from half to full in add.s seems to do a
1497 * non-sign-extending widen (resulting in something that
1498 * gets interpreted as float Inf??)
1499 */
1500 dst[0] = ir3_COV(b, ctx->frag_face, TYPE_S16, TYPE_S32);
1501 dst[0] = ir3_ADD_S(b, dst[0], 0, create_immed(b, 1), 0);
1502 break;
1503 case nir_intrinsic_load_local_invocation_id:
1504 if (!ctx->local_invocation_id) {
1505 ctx->local_invocation_id = create_input_compmask(b, 0, 0x7);
1506 add_sysval_input_compmask(ctx, SYSTEM_VALUE_LOCAL_INVOCATION_ID,
1507 0x7, ctx->local_invocation_id);
1508 }
1509 split_dest(b, dst, ctx->local_invocation_id, 0, 3);
1510 break;
1511 case nir_intrinsic_load_work_group_id:
1512 if (!ctx->work_group_id) {
1513 ctx->work_group_id = create_input_compmask(b, 0, 0x7);
1514 add_sysval_input_compmask(ctx, SYSTEM_VALUE_WORK_GROUP_ID,
1515 0x7, ctx->work_group_id);
1516 ctx->work_group_id->regs[0]->flags |= IR3_REG_HIGH;
1517 }
1518 split_dest(b, dst, ctx->work_group_id, 0, 3);
1519 break;
1520 case nir_intrinsic_load_num_work_groups:
1521 for (int i = 0; i < intr->num_components; i++) {
1522 dst[i] = create_driver_param(ctx, IR3_DP_NUM_WORK_GROUPS_X + i);
1523 }
1524 break;
1525 case nir_intrinsic_discard_if:
1526 case nir_intrinsic_discard: {
1527 struct ir3_instruction *cond, *kill;
1528
1529 if (intr->intrinsic == nir_intrinsic_discard_if) {
1530 /* conditional discard: */
1531 src = get_src(ctx, &intr->src[0]);
1532 cond = ir3_b2n(b, src[0]);
1533 } else {
1534 /* unconditional discard: */
1535 cond = create_immed(b, 1);
1536 }
1537
1538 /* NOTE: only cmps.*.* can write p0.x: */
1539 cond = ir3_CMPS_S(b, cond, 0, create_immed(b, 0), 0);
1540 cond->cat2.condition = IR3_COND_NE;
1541
1542 /* condition always goes in predicate register: */
1543 cond->regs[0]->num = regid(REG_P0, 0);
1544
1545 kill = ir3_KILL(b, cond, 0);
1546 array_insert(ctx->ir, ctx->ir->predicates, kill);
1547
1548 array_insert(b, b->keeps, kill);
1549 ctx->so->has_kill = true;
1550
1551 break;
1552 }
1553 default:
1554 compile_error(ctx, "Unhandled intrinsic type: %s\n",
1555 nir_intrinsic_infos[intr->intrinsic].name);
1556 break;
1557 }
1558 }
1559
1560 static void
1561 emit_load_const(struct ir3_compile *ctx, nir_load_const_instr *instr)
1562 {
1563 struct ir3_instruction **dst = get_dst_ssa(ctx, &instr->def,
1564 instr->def.num_components);
1565 for (int i = 0; i < instr->def.num_components; i++)
1566 dst[i] = create_immed(ctx->block, instr->value.u32[i]);
1567 }
1568
1569 static void
1570 emit_undef(struct ir3_compile *ctx, nir_ssa_undef_instr *undef)
1571 {
1572 struct ir3_instruction **dst = get_dst_ssa(ctx, &undef->def,
1573 undef->def.num_components);
1574 /* backend doesn't want undefined instructions, so just plug
1575 * in 0.0..
1576 */
1577 for (int i = 0; i < undef->def.num_components; i++)
1578 dst[i] = create_immed(ctx->block, fui(0.0));
1579 }
1580
1581 /*
1582 * texture fetch/sample instructions:
1583 */
1584
1585 static void
1586 tex_info(nir_tex_instr *tex, unsigned *flagsp, unsigned *coordsp)
1587 {
1588 unsigned coords, flags = 0;
1589
1590 /* note: would use tex->coord_components.. except txs.. also,
1591 * since array index goes after shadow ref, we don't want to
1592 * count it:
1593 */
1594 switch (tex->sampler_dim) {
1595 case GLSL_SAMPLER_DIM_1D:
1596 case GLSL_SAMPLER_DIM_BUF:
1597 coords = 1;
1598 break;
1599 case GLSL_SAMPLER_DIM_2D:
1600 case GLSL_SAMPLER_DIM_RECT:
1601 case GLSL_SAMPLER_DIM_EXTERNAL:
1602 case GLSL_SAMPLER_DIM_MS:
1603 coords = 2;
1604 break;
1605 case GLSL_SAMPLER_DIM_3D:
1606 case GLSL_SAMPLER_DIM_CUBE:
1607 coords = 3;
1608 flags |= IR3_INSTR_3D;
1609 break;
1610 default:
1611 unreachable("bad sampler_dim");
1612 }
1613
1614 if (tex->is_shadow && tex->op != nir_texop_lod)
1615 flags |= IR3_INSTR_S;
1616
1617 if (tex->is_array && tex->op != nir_texop_lod)
1618 flags |= IR3_INSTR_A;
1619
1620 *flagsp = flags;
1621 *coordsp = coords;
1622 }
1623
1624 static void
1625 emit_tex(struct ir3_compile *ctx, nir_tex_instr *tex)
1626 {
1627 struct ir3_block *b = ctx->block;
1628 struct ir3_instruction **dst, *sam, *src0[12], *src1[4];
1629 struct ir3_instruction * const *coord, * const *off, * const *ddx, * const *ddy;
1630 struct ir3_instruction *lod, *compare, *proj;
1631 bool has_bias = false, has_lod = false, has_proj = false, has_off = false;
1632 unsigned i, coords, flags;
1633 unsigned nsrc0 = 0, nsrc1 = 0;
1634 type_t type;
1635 opc_t opc = 0;
1636
1637 coord = off = ddx = ddy = NULL;
1638 lod = proj = compare = NULL;
1639
1640 /* TODO: might just be one component for gathers? */
1641 dst = get_dst(ctx, &tex->dest, 4);
1642
1643 for (unsigned i = 0; i < tex->num_srcs; i++) {
1644 switch (tex->src[i].src_type) {
1645 case nir_tex_src_coord:
1646 coord = get_src(ctx, &tex->src[i].src);
1647 break;
1648 case nir_tex_src_bias:
1649 lod = get_src(ctx, &tex->src[i].src)[0];
1650 has_bias = true;
1651 break;
1652 case nir_tex_src_lod:
1653 lod = get_src(ctx, &tex->src[i].src)[0];
1654 has_lod = true;
1655 break;
1656 case nir_tex_src_comparator: /* shadow comparator */
1657 compare = get_src(ctx, &tex->src[i].src)[0];
1658 break;
1659 case nir_tex_src_projector:
1660 proj = get_src(ctx, &tex->src[i].src)[0];
1661 has_proj = true;
1662 break;
1663 case nir_tex_src_offset:
1664 off = get_src(ctx, &tex->src[i].src);
1665 has_off = true;
1666 break;
1667 case nir_tex_src_ddx:
1668 ddx = get_src(ctx, &tex->src[i].src);
1669 break;
1670 case nir_tex_src_ddy:
1671 ddy = get_src(ctx, &tex->src[i].src);
1672 break;
1673 default:
1674 compile_error(ctx, "Unhandled NIR tex src type: %d\n",
1675 tex->src[i].src_type);
1676 return;
1677 }
1678 }
1679
1680 switch (tex->op) {
1681 case nir_texop_tex: opc = OPC_SAM; break;
1682 case nir_texop_txb: opc = OPC_SAMB; break;
1683 case nir_texop_txl: opc = OPC_SAML; break;
1684 case nir_texop_txd: opc = OPC_SAMGQ; break;
1685 case nir_texop_txf: opc = OPC_ISAML; break;
1686 case nir_texop_lod: opc = OPC_GETLOD; break;
1687 case nir_texop_txf_ms:
1688 case nir_texop_txs:
1689 case nir_texop_tg4:
1690 case nir_texop_query_levels:
1691 case nir_texop_texture_samples:
1692 case nir_texop_samples_identical:
1693 case nir_texop_txf_ms_mcs:
1694 compile_error(ctx, "Unhandled NIR tex type: %d\n", tex->op);
1695 return;
1696 }
1697
1698 tex_info(tex, &flags, &coords);
1699
1700 /*
1701 * lay out the first argument in the proper order:
1702 * - actual coordinates first
1703 * - shadow reference
1704 * - array index
1705 * - projection w
1706 * - starting at offset 4, dpdx.xy, dpdy.xy
1707 *
1708 * bias/lod go into the second arg
1709 */
1710
1711 /* insert tex coords: */
1712 for (i = 0; i < coords; i++)
1713 src0[i] = coord[i];
1714
1715 nsrc0 = i;
1716
1717 /* scale up integer coords for TXF based on the LOD */
1718 if (ctx->unminify_coords && (opc == OPC_ISAML)) {
1719 assert(has_lod);
1720 for (i = 0; i < coords; i++)
1721 src0[i] = ir3_SHL_B(b, src0[i], 0, lod, 0);
1722 }
1723
1724 if (coords == 1) {
1725 /* hw doesn't do 1d, so we treat it as 2d with
1726 * height of 1, and patch up the y coord.
1727 * TODO: y coord should be (int)0 in some cases..
1728 */
1729 src0[nsrc0++] = create_immed(b, fui(0.5));
1730 }
1731
1732 if (tex->is_shadow && tex->op != nir_texop_lod)
1733 src0[nsrc0++] = compare;
1734
1735 if (tex->is_array && tex->op != nir_texop_lod) {
1736 struct ir3_instruction *idx = coord[coords];
1737
1738 /* the array coord for cube arrays needs 0.5 added to it */
1739 if (ctx->array_index_add_half && (opc != OPC_ISAML))
1740 idx = ir3_ADD_F(b, idx, 0, create_immed(b, fui(0.5)), 0);
1741
1742 src0[nsrc0++] = idx;
1743 }
1744
1745 if (has_proj) {
1746 src0[nsrc0++] = proj;
1747 flags |= IR3_INSTR_P;
1748 }
1749
1750 /* pad to 4, then ddx/ddy: */
1751 if (tex->op == nir_texop_txd) {
1752 while (nsrc0 < 4)
1753 src0[nsrc0++] = create_immed(b, fui(0.0));
1754 for (i = 0; i < coords; i++)
1755 src0[nsrc0++] = ddx[i];
1756 if (coords < 2)
1757 src0[nsrc0++] = create_immed(b, fui(0.0));
1758 for (i = 0; i < coords; i++)
1759 src0[nsrc0++] = ddy[i];
1760 if (coords < 2)
1761 src0[nsrc0++] = create_immed(b, fui(0.0));
1762 }
1763
1764 /*
1765 * second argument (if applicable):
1766 * - offsets
1767 * - lod
1768 * - bias
1769 */
1770 if (has_off | has_lod | has_bias) {
1771 if (has_off) {
1772 for (i = 0; i < coords; i++)
1773 src1[nsrc1++] = off[i];
1774 if (coords < 2)
1775 src1[nsrc1++] = create_immed(b, fui(0.0));
1776 flags |= IR3_INSTR_O;
1777 }
1778
1779 if (has_lod | has_bias)
1780 src1[nsrc1++] = lod;
1781 }
1782
1783 switch (tex->dest_type) {
1784 case nir_type_invalid:
1785 case nir_type_float:
1786 type = TYPE_F32;
1787 break;
1788 case nir_type_int:
1789 type = TYPE_S32;
1790 break;
1791 case nir_type_uint:
1792 case nir_type_bool:
1793 type = TYPE_U32;
1794 break;
1795 default:
1796 unreachable("bad dest_type");
1797 }
1798
1799 if (opc == OPC_GETLOD)
1800 type = TYPE_U32;
1801
1802 unsigned tex_idx = tex->texture_index;
1803
1804 ctx->max_texture_index = MAX2(ctx->max_texture_index, tex_idx);
1805
1806 struct ir3_instruction *col0 = create_collect(b, src0, nsrc0);
1807 struct ir3_instruction *col1 = create_collect(b, src1, nsrc1);
1808
1809 sam = ir3_SAM(b, opc, type, TGSI_WRITEMASK_XYZW, flags,
1810 tex_idx, tex_idx, col0, col1);
1811
1812 if ((ctx->astc_srgb & (1 << tex_idx)) && !nir_tex_instr_is_query(tex)) {
1813 /* only need first 3 components: */
1814 sam->regs[0]->wrmask = 0x7;
1815 split_dest(b, dst, sam, 0, 3);
1816
1817 /* we need to sample the alpha separately with a non-ASTC
1818 * texture state:
1819 */
1820 sam = ir3_SAM(b, opc, type, TGSI_WRITEMASK_W, flags,
1821 tex_idx, tex_idx, col0, col1);
1822
1823 array_insert(ctx->ir, ctx->ir->astc_srgb, sam);
1824
1825 /* fixup .w component: */
1826 split_dest(b, &dst[3], sam, 3, 1);
1827 } else {
1828 /* normal (non-workaround) case: */
1829 split_dest(b, dst, sam, 0, 4);
1830 }
1831
1832 /* GETLOD returns results in 4.8 fixed point */
1833 if (opc == OPC_GETLOD) {
1834 struct ir3_instruction *factor = create_immed(b, fui(1.0 / 256));
1835
1836 compile_assert(ctx, tex->dest_type == nir_type_float);
1837 for (i = 0; i < 2; i++) {
1838 dst[i] = ir3_MUL_F(b, ir3_COV(b, dst[i], TYPE_U32, TYPE_F32), 0,
1839 factor, 0);
1840 }
1841 }
1842 }
1843
1844 static void
1845 emit_tex_query_levels(struct ir3_compile *ctx, nir_tex_instr *tex)
1846 {
1847 struct ir3_block *b = ctx->block;
1848 struct ir3_instruction **dst, *sam;
1849
1850 dst = get_dst(ctx, &tex->dest, 1);
1851
1852 sam = ir3_SAM(b, OPC_GETINFO, TYPE_U32, TGSI_WRITEMASK_Z, 0,
1853 tex->texture_index, tex->texture_index, NULL, NULL);
1854
1855 /* even though there is only one component, since it ends
1856 * up in .z rather than .x, we need a split_dest()
1857 */
1858 split_dest(b, dst, sam, 0, 3);
1859
1860 /* The # of levels comes from getinfo.z. We need to add 1 to it, since
1861 * the value in TEX_CONST_0 is zero-based.
1862 */
1863 if (ctx->levels_add_one)
1864 dst[0] = ir3_ADD_U(b, dst[0], 0, create_immed(b, 1), 0);
1865 }
1866
1867 static void
1868 emit_tex_txs(struct ir3_compile *ctx, nir_tex_instr *tex)
1869 {
1870 struct ir3_block *b = ctx->block;
1871 struct ir3_instruction **dst, *sam;
1872 struct ir3_instruction *lod;
1873 unsigned flags, coords;
1874
1875 tex_info(tex, &flags, &coords);
1876
1877 /* Actually we want the number of dimensions, not coordinates. This
1878 * distinction only matters for cubes.
1879 */
1880 if (tex->sampler_dim == GLSL_SAMPLER_DIM_CUBE)
1881 coords = 2;
1882
1883 dst = get_dst(ctx, &tex->dest, 4);
1884
1885 compile_assert(ctx, tex->num_srcs == 1);
1886 compile_assert(ctx, tex->src[0].src_type == nir_tex_src_lod);
1887
1888 lod = get_src(ctx, &tex->src[0].src)[0];
1889
1890 sam = ir3_SAM(b, OPC_GETSIZE, TYPE_U32, TGSI_WRITEMASK_XYZW, flags,
1891 tex->texture_index, tex->texture_index, lod, NULL);
1892
1893 split_dest(b, dst, sam, 0, 4);
1894
1895 /* Array size actually ends up in .w rather than .z. This doesn't
1896 * matter for miplevel 0, but for higher mips the value in z is
1897 * minified whereas w stays. Also, the value in TEX_CONST_3_DEPTH is
1898 * returned, which means that we have to add 1 to it for arrays.
1899 */
1900 if (tex->is_array) {
1901 if (ctx->levels_add_one) {
1902 dst[coords] = ir3_ADD_U(b, dst[3], 0, create_immed(b, 1), 0);
1903 } else {
1904 dst[coords] = ir3_MOV(b, dst[3], TYPE_U32);
1905 }
1906 }
1907 }
1908
1909 static void
1910 emit_phi(struct ir3_compile *ctx, nir_phi_instr *nphi)
1911 {
1912 struct ir3_instruction *phi, **dst;
1913
1914 /* NOTE: phi's should be lowered to scalar at this point */
1915 compile_assert(ctx, nphi->dest.ssa.num_components == 1);
1916
1917 dst = get_dst(ctx, &nphi->dest, 1);
1918
1919 phi = ir3_instr_create2(ctx->block, OPC_META_PHI,
1920 1 + exec_list_length(&nphi->srcs));
1921 ir3_reg_create(phi, 0, 0); /* dst */
1922 phi->phi.nphi = nphi;
1923
1924 dst[0] = phi;
1925 }
1926
1927 /* phi instructions are left partially constructed. We don't resolve
1928 * their srcs until the end of the block, since (eg. loops) one of
1929 * the phi's srcs might be defined after the phi due to back edges in
1930 * the CFG.
1931 */
1932 static void
1933 resolve_phis(struct ir3_compile *ctx, struct ir3_block *block)
1934 {
1935 list_for_each_entry (struct ir3_instruction, instr, &block->instr_list, node) {
1936 nir_phi_instr *nphi;
1937
1938 /* phi's only come at start of block: */
1939 if (instr->opc != OPC_META_PHI)
1940 break;
1941
1942 if (!instr->phi.nphi)
1943 break;
1944
1945 nphi = instr->phi.nphi;
1946 instr->phi.nphi = NULL;
1947
1948 foreach_list_typed(nir_phi_src, nsrc, node, &nphi->srcs) {
1949 struct ir3_instruction *src = get_src(ctx, &nsrc->src)[0];
1950
1951 /* NOTE: src might not be in the same block as it comes from
1952 * according to the phi.. but in the end the backend assumes
1953 * it will be able to assign the same register to each (which
1954 * only works if it is assigned in the src block), so insert
1955 * an extra mov to make sure the phi src is assigned in the
1956 * block it comes from:
1957 */
1958 src = ir3_MOV(get_block(ctx, nsrc->pred), src, TYPE_U32);
1959
1960 ir3_reg_create(instr, 0, IR3_REG_SSA)->instr = src;
1961 }
1962 }
1963 }
1964
1965 static void
1966 emit_jump(struct ir3_compile *ctx, nir_jump_instr *jump)
1967 {
1968 switch (jump->type) {
1969 case nir_jump_break:
1970 case nir_jump_continue:
1971 /* I *think* we can simply just ignore this, and use the
1972 * successor block link to figure out where we need to
1973 * jump to for break/continue
1974 */
1975 break;
1976 default:
1977 compile_error(ctx, "Unhandled NIR jump type: %d\n", jump->type);
1978 break;
1979 }
1980 }
1981
1982 static void
1983 emit_instr(struct ir3_compile *ctx, nir_instr *instr)
1984 {
1985 switch (instr->type) {
1986 case nir_instr_type_alu:
1987 emit_alu(ctx, nir_instr_as_alu(instr));
1988 break;
1989 case nir_instr_type_intrinsic:
1990 emit_intrinsic(ctx, nir_instr_as_intrinsic(instr));
1991 break;
1992 case nir_instr_type_load_const:
1993 emit_load_const(ctx, nir_instr_as_load_const(instr));
1994 break;
1995 case nir_instr_type_ssa_undef:
1996 emit_undef(ctx, nir_instr_as_ssa_undef(instr));
1997 break;
1998 case nir_instr_type_tex: {
1999 nir_tex_instr *tex = nir_instr_as_tex(instr);
2000 /* couple tex instructions get special-cased:
2001 */
2002 switch (tex->op) {
2003 case nir_texop_txs:
2004 emit_tex_txs(ctx, tex);
2005 break;
2006 case nir_texop_query_levels:
2007 emit_tex_query_levels(ctx, tex);
2008 break;
2009 default:
2010 emit_tex(ctx, tex);
2011 break;
2012 }
2013 break;
2014 }
2015 case nir_instr_type_phi:
2016 emit_phi(ctx, nir_instr_as_phi(instr));
2017 break;
2018 case nir_instr_type_jump:
2019 emit_jump(ctx, nir_instr_as_jump(instr));
2020 break;
2021 case nir_instr_type_call:
2022 case nir_instr_type_parallel_copy:
2023 compile_error(ctx, "Unhandled NIR instruction type: %d\n", instr->type);
2024 break;
2025 }
2026 }
2027
2028 static struct ir3_block *
2029 get_block(struct ir3_compile *ctx, nir_block *nblock)
2030 {
2031 struct ir3_block *block;
2032 struct hash_entry *entry;
2033 entry = _mesa_hash_table_search(ctx->block_ht, nblock);
2034 if (entry)
2035 return entry->data;
2036
2037 block = ir3_block_create(ctx->ir);
2038 block->nblock = nblock;
2039 _mesa_hash_table_insert(ctx->block_ht, nblock, block);
2040
2041 return block;
2042 }
2043
2044 static void
2045 emit_block(struct ir3_compile *ctx, nir_block *nblock)
2046 {
2047 struct ir3_block *block = get_block(ctx, nblock);
2048
2049 for (int i = 0; i < ARRAY_SIZE(block->successors); i++) {
2050 if (nblock->successors[i]) {
2051 block->successors[i] =
2052 get_block(ctx, nblock->successors[i]);
2053 }
2054 }
2055
2056 ctx->block = block;
2057 list_addtail(&block->node, &ctx->ir->block_list);
2058
2059 /* re-emit addr register in each block if needed: */
2060 _mesa_hash_table_destroy(ctx->addr_ht, NULL);
2061 ctx->addr_ht = NULL;
2062
2063 nir_foreach_instr(instr, nblock) {
2064 emit_instr(ctx, instr);
2065 if (ctx->error)
2066 return;
2067 }
2068 }
2069
2070 static void emit_cf_list(struct ir3_compile *ctx, struct exec_list *list);
2071
2072 static void
2073 emit_if(struct ir3_compile *ctx, nir_if *nif)
2074 {
2075 struct ir3_instruction *condition = get_src(ctx, &nif->condition)[0];
2076
2077 ctx->block->condition =
2078 get_predicate(ctx, ir3_b2n(condition->block, condition));
2079
2080 emit_cf_list(ctx, &nif->then_list);
2081 emit_cf_list(ctx, &nif->else_list);
2082 }
2083
2084 static void
2085 emit_loop(struct ir3_compile *ctx, nir_loop *nloop)
2086 {
2087 emit_cf_list(ctx, &nloop->body);
2088 }
2089
2090 static void
2091 emit_cf_list(struct ir3_compile *ctx, struct exec_list *list)
2092 {
2093 foreach_list_typed(nir_cf_node, node, node, list) {
2094 switch (node->type) {
2095 case nir_cf_node_block:
2096 emit_block(ctx, nir_cf_node_as_block(node));
2097 break;
2098 case nir_cf_node_if:
2099 emit_if(ctx, nir_cf_node_as_if(node));
2100 break;
2101 case nir_cf_node_loop:
2102 emit_loop(ctx, nir_cf_node_as_loop(node));
2103 break;
2104 case nir_cf_node_function:
2105 compile_error(ctx, "TODO\n");
2106 break;
2107 }
2108 }
2109 }
2110
2111 /* emit stream-out code. At this point, the current block is the original
2112 * (nir) end block, and nir ensures that all flow control paths terminate
2113 * into the end block. We re-purpose the original end block to generate
2114 * the 'if (vtxcnt < maxvtxcnt)' condition, then append the conditional
2115 * block holding stream-out write instructions, followed by the new end
2116 * block:
2117 *
2118 * blockOrigEnd {
2119 * p0.x = (vtxcnt < maxvtxcnt)
2120 * // succs: blockStreamOut, blockNewEnd
2121 * }
2122 * blockStreamOut {
2123 * ... stream-out instructions ...
2124 * // succs: blockNewEnd
2125 * }
2126 * blockNewEnd {
2127 * }
2128 */
2129 static void
2130 emit_stream_out(struct ir3_compile *ctx)
2131 {
2132 struct ir3_shader_variant *v = ctx->so;
2133 struct ir3 *ir = ctx->ir;
2134 struct pipe_stream_output_info *strmout =
2135 &ctx->so->shader->stream_output;
2136 struct ir3_block *orig_end_block, *stream_out_block, *new_end_block;
2137 struct ir3_instruction *vtxcnt, *maxvtxcnt, *cond;
2138 struct ir3_instruction *bases[PIPE_MAX_SO_BUFFERS];
2139
2140 /* create vtxcnt input in input block at top of shader,
2141 * so that it is seen as live over the entire duration
2142 * of the shader:
2143 */
2144 vtxcnt = create_input(ctx->in_block, 0);
2145 add_sysval_input(ctx, SYSTEM_VALUE_VERTEX_CNT, vtxcnt);
2146
2147 maxvtxcnt = create_driver_param(ctx, IR3_DP_VTXCNT_MAX);
2148
2149 /* at this point, we are at the original 'end' block,
2150 * re-purpose this block to stream-out condition, then
2151 * append stream-out block and new-end block
2152 */
2153 orig_end_block = ctx->block;
2154
2155 stream_out_block = ir3_block_create(ir);
2156 list_addtail(&stream_out_block->node, &ir->block_list);
2157
2158 new_end_block = ir3_block_create(ir);
2159 list_addtail(&new_end_block->node, &ir->block_list);
2160
2161 orig_end_block->successors[0] = stream_out_block;
2162 orig_end_block->successors[1] = new_end_block;
2163 stream_out_block->successors[0] = new_end_block;
2164
2165 /* setup 'if (vtxcnt < maxvtxcnt)' condition: */
2166 cond = ir3_CMPS_S(ctx->block, vtxcnt, 0, maxvtxcnt, 0);
2167 cond->regs[0]->num = regid(REG_P0, 0);
2168 cond->cat2.condition = IR3_COND_LT;
2169
2170 /* condition goes on previous block to the conditional,
2171 * since it is used to pick which of the two successor
2172 * paths to take:
2173 */
2174 orig_end_block->condition = cond;
2175
2176 /* switch to stream_out_block to generate the stream-out
2177 * instructions:
2178 */
2179 ctx->block = stream_out_block;
2180
2181 /* Calculate base addresses based on vtxcnt. Instructions
2182 * generated for bases not used in following loop will be
2183 * stripped out in the backend.
2184 */
2185 for (unsigned i = 0; i < PIPE_MAX_SO_BUFFERS; i++) {
2186 unsigned stride = strmout->stride[i];
2187 struct ir3_instruction *base, *off;
2188
2189 base = create_uniform(ctx, regid(v->constbase.tfbo, i));
2190
2191 /* 24-bit should be enough: */
2192 off = ir3_MUL_U(ctx->block, vtxcnt, 0,
2193 create_immed(ctx->block, stride * 4), 0);
2194
2195 bases[i] = ir3_ADD_S(ctx->block, off, 0, base, 0);
2196 }
2197
2198 /* Generate the per-output store instructions: */
2199 for (unsigned i = 0; i < strmout->num_outputs; i++) {
2200 for (unsigned j = 0; j < strmout->output[i].num_components; j++) {
2201 unsigned c = j + strmout->output[i].start_component;
2202 struct ir3_instruction *base, *out, *stg;
2203
2204 base = bases[strmout->output[i].output_buffer];
2205 out = ctx->ir->outputs[regid(strmout->output[i].register_index, c)];
2206
2207 stg = ir3_STG(ctx->block, base, 0, out, 0,
2208 create_immed(ctx->block, 1), 0);
2209 stg->cat6.type = TYPE_U32;
2210 stg->cat6.dst_offset = (strmout->output[i].dst_offset + j) * 4;
2211
2212 array_insert(ctx->block, ctx->block->keeps, stg);
2213 }
2214 }
2215
2216 /* and finally switch to the new_end_block: */
2217 ctx->block = new_end_block;
2218 }
2219
2220 static void
2221 emit_function(struct ir3_compile *ctx, nir_function_impl *impl)
2222 {
2223 nir_metadata_require(impl, nir_metadata_block_index);
2224
2225 emit_cf_list(ctx, &impl->body);
2226 emit_block(ctx, impl->end_block);
2227
2228 /* at this point, we should have a single empty block,
2229 * into which we emit the 'end' instruction.
2230 */
2231 compile_assert(ctx, list_empty(&ctx->block->instr_list));
2232
2233 /* If stream-out (aka transform-feedback) enabled, emit the
2234 * stream-out instructions, followed by a new empty block (into
2235 * which the 'end' instruction lands).
2236 *
2237 * NOTE: it is done in this order, rather than inserting before
2238 * we emit end_block, because NIR guarantees that all blocks
2239 * flow into end_block, and that end_block has no successors.
2240 * So by re-purposing end_block as the first block of stream-
2241 * out, we guarantee that all exit paths flow into the stream-
2242 * out instructions.
2243 */
2244 if ((ctx->compiler->gpu_id < 500) &&
2245 (ctx->so->shader->stream_output.num_outputs > 0) &&
2246 !ctx->so->key.binning_pass) {
2247 debug_assert(ctx->so->type == SHADER_VERTEX);
2248 emit_stream_out(ctx);
2249 }
2250
2251 ir3_END(ctx->block);
2252 }
2253
2254 static void
2255 setup_input(struct ir3_compile *ctx, nir_variable *in)
2256 {
2257 struct ir3_shader_variant *so = ctx->so;
2258 unsigned array_len = MAX2(glsl_get_length(in->type), 1);
2259 unsigned ncomp = glsl_get_components(in->type);
2260 unsigned n = in->data.driver_location;
2261 unsigned slot = in->data.location;
2262
2263 DBG("; in: slot=%u, len=%ux%u, drvloc=%u",
2264 slot, array_len, ncomp, n);
2265
2266 /* let's pretend things other than vec4 don't exist: */
2267 ncomp = MAX2(ncomp, 4);
2268 compile_assert(ctx, ncomp == 4);
2269
2270 so->inputs[n].slot = slot;
2271 so->inputs[n].compmask = (1 << ncomp) - 1;
2272 so->inputs_count = MAX2(so->inputs_count, n + 1);
2273 so->inputs[n].interpolate = in->data.interpolation;
2274
2275 if (ctx->so->type == SHADER_FRAGMENT) {
2276 for (int i = 0; i < ncomp; i++) {
2277 struct ir3_instruction *instr = NULL;
2278 unsigned idx = (n * 4) + i;
2279
2280 if (slot == VARYING_SLOT_POS) {
2281 so->inputs[n].bary = false;
2282 so->frag_coord = true;
2283 instr = create_frag_coord(ctx, i);
2284 } else if (slot == VARYING_SLOT_PNTC) {
2285 /* see for example st_get_generic_varying_index().. this is
2286 * maybe a bit mesa/st specific. But we need things to line
2287 * up for this in fdN_program:
2288 * unsigned texmask = 1 << (slot - VARYING_SLOT_VAR0);
2289 * if (emit->sprite_coord_enable & texmask) {
2290 * ...
2291 * }
2292 */
2293 so->inputs[n].slot = VARYING_SLOT_VAR8;
2294 so->inputs[n].bary = true;
2295 instr = create_frag_input(ctx, false);
2296 } else {
2297 bool use_ldlv = false;
2298
2299 /* detect the special case for front/back colors where
2300 * we need to do flat vs smooth shading depending on
2301 * rast state:
2302 */
2303 if (in->data.interpolation == INTERP_MODE_NONE) {
2304 switch (slot) {
2305 case VARYING_SLOT_COL0:
2306 case VARYING_SLOT_COL1:
2307 case VARYING_SLOT_BFC0:
2308 case VARYING_SLOT_BFC1:
2309 so->inputs[n].rasterflat = true;
2310 break;
2311 default:
2312 break;
2313 }
2314 }
2315
2316 if (ctx->flat_bypass) {
2317 if ((so->inputs[n].interpolate == INTERP_MODE_FLAT) ||
2318 (so->inputs[n].rasterflat && ctx->so->key.rasterflat))
2319 use_ldlv = true;
2320 }
2321
2322 so->inputs[n].bary = true;
2323
2324 instr = create_frag_input(ctx, use_ldlv);
2325 }
2326
2327 compile_assert(ctx, idx < ctx->ir->ninputs);
2328
2329 ctx->ir->inputs[idx] = instr;
2330 }
2331 } else if (ctx->so->type == SHADER_VERTEX) {
2332 for (int i = 0; i < ncomp; i++) {
2333 unsigned idx = (n * 4) + i;
2334 compile_assert(ctx, idx < ctx->ir->ninputs);
2335 ctx->ir->inputs[idx] = create_input(ctx->block, idx);
2336 }
2337 } else {
2338 compile_error(ctx, "unknown shader type: %d\n", ctx->so->type);
2339 }
2340
2341 if (so->inputs[n].bary || (ctx->so->type == SHADER_VERTEX)) {
2342 so->total_in += ncomp;
2343 }
2344 }
2345
2346 static void
2347 setup_output(struct ir3_compile *ctx, nir_variable *out)
2348 {
2349 struct ir3_shader_variant *so = ctx->so;
2350 unsigned array_len = MAX2(glsl_get_length(out->type), 1);
2351 unsigned ncomp = glsl_get_components(out->type);
2352 unsigned n = out->data.driver_location;
2353 unsigned slot = out->data.location;
2354 unsigned comp = 0;
2355
2356 DBG("; out: slot=%u, len=%ux%u, drvloc=%u",
2357 slot, array_len, ncomp, n);
2358
2359 /* let's pretend things other than vec4 don't exist: */
2360 ncomp = MAX2(ncomp, 4);
2361 compile_assert(ctx, ncomp == 4);
2362
2363 if (ctx->so->type == SHADER_FRAGMENT) {
2364 switch (slot) {
2365 case FRAG_RESULT_DEPTH:
2366 comp = 2; /* tgsi will write to .z component */
2367 so->writes_pos = true;
2368 break;
2369 case FRAG_RESULT_COLOR:
2370 so->color0_mrt = 1;
2371 break;
2372 default:
2373 if (slot >= FRAG_RESULT_DATA0)
2374 break;
2375 compile_error(ctx, "unknown FS output name: %s\n",
2376 gl_frag_result_name(slot));
2377 }
2378 } else if (ctx->so->type == SHADER_VERTEX) {
2379 switch (slot) {
2380 case VARYING_SLOT_POS:
2381 so->writes_pos = true;
2382 break;
2383 case VARYING_SLOT_PSIZ:
2384 so->writes_psize = true;
2385 break;
2386 case VARYING_SLOT_COL0:
2387 case VARYING_SLOT_COL1:
2388 case VARYING_SLOT_BFC0:
2389 case VARYING_SLOT_BFC1:
2390 case VARYING_SLOT_FOGC:
2391 case VARYING_SLOT_CLIP_DIST0:
2392 case VARYING_SLOT_CLIP_DIST1:
2393 case VARYING_SLOT_CLIP_VERTEX:
2394 break;
2395 default:
2396 if (slot >= VARYING_SLOT_VAR0)
2397 break;
2398 if ((VARYING_SLOT_TEX0 <= slot) && (slot <= VARYING_SLOT_TEX7))
2399 break;
2400 compile_error(ctx, "unknown VS output name: %s\n",
2401 gl_varying_slot_name(slot));
2402 }
2403 } else {
2404 compile_error(ctx, "unknown shader type: %d\n", ctx->so->type);
2405 }
2406
2407 compile_assert(ctx, n < ARRAY_SIZE(so->outputs));
2408
2409 so->outputs[n].slot = slot;
2410 so->outputs[n].regid = regid(n, comp);
2411 so->outputs_count = MAX2(so->outputs_count, n + 1);
2412
2413 for (int i = 0; i < ncomp; i++) {
2414 unsigned idx = (n * 4) + i;
2415 compile_assert(ctx, idx < ctx->ir->noutputs);
2416 ctx->ir->outputs[idx] = create_immed(ctx->block, fui(0.0));
2417 }
2418 }
2419
2420 static int
2421 max_drvloc(struct exec_list *vars)
2422 {
2423 int drvloc = -1;
2424 nir_foreach_variable(var, vars) {
2425 drvloc = MAX2(drvloc, (int)var->data.driver_location);
2426 }
2427 return drvloc;
2428 }
2429
2430 static const unsigned max_sysvals[SHADER_MAX] = {
2431 [SHADER_VERTEX] = 16,
2432 [SHADER_COMPUTE] = 16, // TODO how many do we actually need?
2433 };
2434
2435 static void
2436 emit_instructions(struct ir3_compile *ctx)
2437 {
2438 unsigned ninputs, noutputs;
2439 nir_function_impl *fxn = nir_shader_get_entrypoint(ctx->s);
2440
2441 ninputs = (max_drvloc(&ctx->s->inputs) + 1) * 4;
2442 noutputs = (max_drvloc(&ctx->s->outputs) + 1) * 4;
2443
2444 /* we need to leave room for sysvals:
2445 */
2446 ninputs += max_sysvals[ctx->so->type];
2447
2448 ctx->ir = ir3_create(ctx->compiler, ninputs, noutputs);
2449
2450 /* Create inputs in first block: */
2451 ctx->block = get_block(ctx, nir_start_block(fxn));
2452 ctx->in_block = ctx->block;
2453 list_addtail(&ctx->block->node, &ctx->ir->block_list);
2454
2455 ninputs -= max_sysvals[ctx->so->type];
2456
2457 /* for fragment shader, we have a single input register (usually
2458 * r0.xy) which is used as the base for bary.f varying fetch instrs:
2459 */
2460 if (ctx->so->type == SHADER_FRAGMENT) {
2461 // TODO maybe a helper for fi since we need it a few places..
2462 struct ir3_instruction *instr;
2463 instr = ir3_instr_create(ctx->block, OPC_META_FI);
2464 ir3_reg_create(instr, 0, 0);
2465 ir3_reg_create(instr, 0, IR3_REG_SSA); /* r0.x */
2466 ir3_reg_create(instr, 0, IR3_REG_SSA); /* r0.y */
2467 ctx->frag_pos = instr;
2468 }
2469
2470 /* Setup inputs: */
2471 nir_foreach_variable(var, &ctx->s->inputs) {
2472 setup_input(ctx, var);
2473 }
2474
2475 /* Setup outputs: */
2476 nir_foreach_variable(var, &ctx->s->outputs) {
2477 setup_output(ctx, var);
2478 }
2479
2480 /* Setup global variables (which should only be arrays): */
2481 nir_foreach_variable(var, &ctx->s->globals) {
2482 declare_var(ctx, var);
2483 }
2484
2485 /* Setup local variables (which should only be arrays): */
2486 /* NOTE: need to do something more clever when we support >1 fxn */
2487 nir_foreach_variable(var, &fxn->locals) {
2488 declare_var(ctx, var);
2489 }
2490
2491 /* And emit the body: */
2492 ctx->impl = fxn;
2493 emit_function(ctx, fxn);
2494
2495 list_for_each_entry (struct ir3_block, block, &ctx->ir->block_list, node) {
2496 resolve_phis(ctx, block);
2497 }
2498 }
2499
2500 /* from NIR perspective, we actually have inputs. But most of the "inputs"
2501 * for a fragment shader are just bary.f instructions. The *actual* inputs
2502 * from the hw perspective are the frag_pos and optionally frag_coord and
2503 * frag_face.
2504 */
2505 static void
2506 fixup_frag_inputs(struct ir3_compile *ctx)
2507 {
2508 struct ir3_shader_variant *so = ctx->so;
2509 struct ir3 *ir = ctx->ir;
2510 struct ir3_instruction **inputs;
2511 struct ir3_instruction *instr;
2512 int n, regid = 0;
2513
2514 ir->ninputs = 0;
2515
2516 n = 4; /* always have frag_pos */
2517 n += COND(so->frag_face, 4);
2518 n += COND(so->frag_coord, 4);
2519
2520 inputs = ir3_alloc(ctx->ir, n * (sizeof(struct ir3_instruction *)));
2521
2522 if (so->frag_face) {
2523 /* this ultimately gets assigned to hr0.x so doesn't conflict
2524 * with frag_coord/frag_pos..
2525 */
2526 inputs[ir->ninputs++] = ctx->frag_face;
2527 ctx->frag_face->regs[0]->num = 0;
2528
2529 /* remaining channels not used, but let's avoid confusing
2530 * other parts that expect inputs to come in groups of vec4
2531 */
2532 inputs[ir->ninputs++] = NULL;
2533 inputs[ir->ninputs++] = NULL;
2534 inputs[ir->ninputs++] = NULL;
2535 }
2536
2537 /* since we don't know where to set the regid for frag_coord,
2538 * we have to use r0.x for it. But we don't want to *always*
2539 * use r1.x for frag_pos as that could increase the register
2540 * footprint on simple shaders:
2541 */
2542 if (so->frag_coord) {
2543 ctx->frag_coord[0]->regs[0]->num = regid++;
2544 ctx->frag_coord[1]->regs[0]->num = regid++;
2545 ctx->frag_coord[2]->regs[0]->num = regid++;
2546 ctx->frag_coord[3]->regs[0]->num = regid++;
2547
2548 inputs[ir->ninputs++] = ctx->frag_coord[0];
2549 inputs[ir->ninputs++] = ctx->frag_coord[1];
2550 inputs[ir->ninputs++] = ctx->frag_coord[2];
2551 inputs[ir->ninputs++] = ctx->frag_coord[3];
2552 }
2553
2554 /* we always have frag_pos: */
2555 so->pos_regid = regid;
2556
2557 /* r0.x */
2558 instr = create_input(ctx->in_block, ir->ninputs);
2559 instr->regs[0]->num = regid++;
2560 inputs[ir->ninputs++] = instr;
2561 ctx->frag_pos->regs[1]->instr = instr;
2562
2563 /* r0.y */
2564 instr = create_input(ctx->in_block, ir->ninputs);
2565 instr->regs[0]->num = regid++;
2566 inputs[ir->ninputs++] = instr;
2567 ctx->frag_pos->regs[2]->instr = instr;
2568
2569 ir->inputs = inputs;
2570 }
2571
2572 /* Fixup tex sampler state for astc/srgb workaround instructions. We
2573 * need to assign the tex state indexes for these after we know the
2574 * max tex index.
2575 */
2576 static void
2577 fixup_astc_srgb(struct ir3_compile *ctx)
2578 {
2579 struct ir3_shader_variant *so = ctx->so;
2580 /* indexed by original tex idx, value is newly assigned alpha sampler
2581 * state tex idx. Zero is invalid since there is at least one sampler
2582 * if we get here.
2583 */
2584 unsigned alt_tex_state[16] = {0};
2585 unsigned tex_idx = ctx->max_texture_index + 1;
2586 unsigned idx = 0;
2587
2588 so->astc_srgb.base = tex_idx;
2589
2590 for (unsigned i = 0; i < ctx->ir->astc_srgb_count; i++) {
2591 struct ir3_instruction *sam = ctx->ir->astc_srgb[i];
2592
2593 compile_assert(ctx, sam->cat5.tex < ARRAY_SIZE(alt_tex_state));
2594
2595 if (alt_tex_state[sam->cat5.tex] == 0) {
2596 /* assign new alternate/alpha tex state slot: */
2597 alt_tex_state[sam->cat5.tex] = tex_idx++;
2598 so->astc_srgb.orig_idx[idx++] = sam->cat5.tex;
2599 so->astc_srgb.count++;
2600 }
2601
2602 sam->cat5.tex = alt_tex_state[sam->cat5.tex];
2603 }
2604 }
2605
2606 int
2607 ir3_compile_shader_nir(struct ir3_compiler *compiler,
2608 struct ir3_shader_variant *so)
2609 {
2610 struct ir3_compile *ctx;
2611 struct ir3 *ir;
2612 struct ir3_instruction **inputs;
2613 unsigned i, j, actual_in, inloc;
2614 int ret = 0, max_bary;
2615
2616 assert(!so->ir);
2617
2618 ctx = compile_init(compiler, so);
2619 if (!ctx) {
2620 DBG("INIT failed!");
2621 ret = -1;
2622 goto out;
2623 }
2624
2625 emit_instructions(ctx);
2626
2627 if (ctx->error) {
2628 DBG("EMIT failed!");
2629 ret = -1;
2630 goto out;
2631 }
2632
2633 ir = so->ir = ctx->ir;
2634
2635 /* keep track of the inputs from TGSI perspective.. */
2636 inputs = ir->inputs;
2637
2638 /* but fixup actual inputs for frag shader: */
2639 if (so->type == SHADER_FRAGMENT)
2640 fixup_frag_inputs(ctx);
2641
2642 /* at this point, for binning pass, throw away unneeded outputs: */
2643 if (so->key.binning_pass) {
2644 for (i = 0, j = 0; i < so->outputs_count; i++) {
2645 unsigned slot = so->outputs[i].slot;
2646
2647 /* throw away everything but first position/psize */
2648 if ((slot == VARYING_SLOT_POS) || (slot == VARYING_SLOT_PSIZ)) {
2649 if (i != j) {
2650 so->outputs[j] = so->outputs[i];
2651 ir->outputs[(j*4)+0] = ir->outputs[(i*4)+0];
2652 ir->outputs[(j*4)+1] = ir->outputs[(i*4)+1];
2653 ir->outputs[(j*4)+2] = ir->outputs[(i*4)+2];
2654 ir->outputs[(j*4)+3] = ir->outputs[(i*4)+3];
2655 }
2656 j++;
2657 }
2658 }
2659 so->outputs_count = j;
2660 ir->noutputs = j * 4;
2661 }
2662
2663 /* if we want half-precision outputs, mark the output registers
2664 * as half:
2665 */
2666 if (so->key.half_precision) {
2667 for (i = 0; i < ir->noutputs; i++) {
2668 struct ir3_instruction *out = ir->outputs[i];
2669
2670 if (!out)
2671 continue;
2672
2673 /* if frag shader writes z, that needs to be full precision: */
2674 if (so->outputs[i/4].slot == FRAG_RESULT_DEPTH)
2675 continue;
2676
2677 out->regs[0]->flags |= IR3_REG_HALF;
2678 /* output could be a fanout (ie. texture fetch output)
2679 * in which case we need to propagate the half-reg flag
2680 * up to the definer so that RA sees it:
2681 */
2682 if (out->opc == OPC_META_FO) {
2683 out = out->regs[1]->instr;
2684 out->regs[0]->flags |= IR3_REG_HALF;
2685 }
2686
2687 if (out->opc == OPC_MOV) {
2688 out->cat1.dst_type = half_type(out->cat1.dst_type);
2689 }
2690 }
2691 }
2692
2693 if (fd_mesa_debug & FD_DBG_OPTMSGS) {
2694 printf("BEFORE CP:\n");
2695 ir3_print(ir);
2696 }
2697
2698 ir3_cp(ir, so);
2699
2700 if (fd_mesa_debug & FD_DBG_OPTMSGS) {
2701 printf("BEFORE GROUPING:\n");
2702 ir3_print(ir);
2703 }
2704
2705 /* Group left/right neighbors, inserting mov's where needed to
2706 * solve conflicts:
2707 */
2708 ir3_group(ir);
2709
2710 ir3_depth(ir);
2711
2712 if (fd_mesa_debug & FD_DBG_OPTMSGS) {
2713 printf("AFTER DEPTH:\n");
2714 ir3_print(ir);
2715 }
2716
2717 ret = ir3_sched(ir);
2718 if (ret) {
2719 DBG("SCHED failed!");
2720 goto out;
2721 }
2722
2723 if (fd_mesa_debug & FD_DBG_OPTMSGS) {
2724 printf("AFTER SCHED:\n");
2725 ir3_print(ir);
2726 }
2727
2728 ret = ir3_ra(ir, so->type, so->frag_coord, so->frag_face);
2729 if (ret) {
2730 DBG("RA failed!");
2731 goto out;
2732 }
2733
2734 if (fd_mesa_debug & FD_DBG_OPTMSGS) {
2735 printf("AFTER RA:\n");
2736 ir3_print(ir);
2737 }
2738
2739 /* fixup input/outputs: */
2740 for (i = 0; i < so->outputs_count; i++) {
2741 so->outputs[i].regid = ir->outputs[i*4]->regs[0]->num;
2742 }
2743
2744 /* Note that some or all channels of an input may be unused: */
2745 actual_in = 0;
2746 inloc = 0;
2747 for (i = 0; i < so->inputs_count; i++) {
2748 unsigned j, regid = ~0, compmask = 0, maxcomp = 0;
2749 so->inputs[i].ncomp = 0;
2750 so->inputs[i].inloc = inloc;
2751 for (j = 0; j < 4; j++) {
2752 struct ir3_instruction *in = inputs[(i*4) + j];
2753 if (in && !(in->flags & IR3_INSTR_UNUSED)) {
2754 compmask |= (1 << j);
2755 regid = in->regs[0]->num - j;
2756 actual_in++;
2757 so->inputs[i].ncomp++;
2758 if ((so->type == SHADER_FRAGMENT) && so->inputs[i].bary) {
2759 /* assign inloc: */
2760 assert(in->regs[1]->flags & IR3_REG_IMMED);
2761 in->regs[1]->iim_val = inloc + j;
2762 maxcomp = j + 1;
2763 }
2764 }
2765 }
2766 if ((so->type == SHADER_FRAGMENT) && compmask && so->inputs[i].bary) {
2767 so->varying_in++;
2768 so->inputs[i].compmask = (1 << maxcomp) - 1;
2769 inloc += maxcomp;
2770 } else {
2771 so->inputs[i].compmask = compmask;
2772 }
2773 so->inputs[i].regid = regid;
2774 }
2775
2776 if (ctx->astc_srgb)
2777 fixup_astc_srgb(ctx);
2778
2779 /* We need to do legalize after (for frag shader's) the "bary.f"
2780 * offsets (inloc) have been assigned.
2781 */
2782 ir3_legalize(ir, &so->has_samp, &so->has_ssbo, &max_bary);
2783
2784 if (fd_mesa_debug & FD_DBG_OPTMSGS) {
2785 printf("AFTER LEGALIZE:\n");
2786 ir3_print(ir);
2787 }
2788
2789 /* Note that actual_in counts inputs that are not bary.f'd for FS: */
2790 if (so->type == SHADER_VERTEX)
2791 so->total_in = actual_in;
2792 else
2793 so->total_in = max_bary + 1;
2794
2795 out:
2796 if (ret) {
2797 if (so->ir)
2798 ir3_destroy(so->ir);
2799 so->ir = NULL;
2800 }
2801 compile_free(ctx);
2802
2803 return ret;
2804 }