freedreno: move ir3 to common location
[mesa.git] / src / freedreno / ir3 / ir3_compiler_nir.c
1 /*
2 * Copyright (C) 2015 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #include <stdarg.h>
28
29 #include "util/u_string.h"
30 #include "util/u_memory.h"
31 #include "util/u_math.h"
32
33 #include "ir3_compiler.h"
34 #include "ir3_shader.h"
35 #include "ir3_nir.h"
36
37 #include "instr-a3xx.h"
38 #include "ir3.h"
39
40 /* for conditionally setting boolean flag(s): */
41 #define COND(bool, val) ((bool) ? (val) : 0)
42
43 #define DBG(fmt, ...) \
44 do { debug_printf("%s:%d: "fmt "\n", \
45 __FUNCTION__, __LINE__, ##__VA_ARGS__); } while (0)
46
47 struct ir3_context {
48 struct ir3_compiler *compiler;
49
50 struct nir_shader *s;
51
52 struct nir_instr *cur_instr; /* current instruction, just for debug */
53
54 struct ir3 *ir;
55 struct ir3_shader_variant *so;
56
57 struct ir3_block *block; /* the current block */
58 struct ir3_block *in_block; /* block created for shader inputs */
59
60 nir_function_impl *impl;
61
62 /* For fragment shaders, varyings are not actual shader inputs,
63 * instead the hw passes a varying-coord which is used with
64 * bary.f.
65 *
66 * But NIR doesn't know that, it still declares varyings as
67 * inputs. So we do all the input tracking normally and fix
68 * things up after compile_instructions()
69 *
70 * NOTE that frag_vcoord is the hardware position (possibly it
71 * is actually an index or tag or some such.. it is *not*
72 * values that can be directly used for gl_FragCoord..)
73 */
74 struct ir3_instruction *frag_vcoord;
75
76 /* for fragment shaders, for gl_FrontFacing and gl_FragCoord: */
77 struct ir3_instruction *frag_face, *frag_coord;
78
79 /* For vertex shaders, keep track of the system values sources */
80 struct ir3_instruction *vertex_id, *basevertex, *instance_id;
81
82 /* For fragment shaders: */
83 struct ir3_instruction *samp_id, *samp_mask_in;
84
85 /* Compute shader inputs: */
86 struct ir3_instruction *local_invocation_id, *work_group_id;
87
88 /* mapping from nir_register to defining instruction: */
89 struct hash_table *def_ht;
90
91 unsigned num_arrays;
92
93 /* a common pattern for indirect addressing is to request the
94 * same address register multiple times. To avoid generating
95 * duplicate instruction sequences (which our backend does not
96 * try to clean up, since that should be done as the NIR stage)
97 * we cache the address value generated for a given src value:
98 *
99 * Note that we have to cache these per alignment, since same
100 * src used for an array of vec1 cannot be also used for an
101 * array of vec4.
102 */
103 struct hash_table *addr_ht[4];
104
105 /* last dst array, for indirect we need to insert a var-store.
106 */
107 struct ir3_instruction **last_dst;
108 unsigned last_dst_n;
109
110 /* maps nir_block to ir3_block, mostly for the purposes of
111 * figuring out the blocks successors
112 */
113 struct hash_table *block_ht;
114
115 /* on a4xx, bitmask of samplers which need astc+srgb workaround: */
116 unsigned astc_srgb;
117
118 unsigned samples; /* bitmask of x,y sample shifts */
119
120 unsigned max_texture_index;
121
122 /* set if we encounter something we can't handle yet, so we
123 * can bail cleanly and fallback to TGSI compiler f/e
124 */
125 bool error;
126 };
127
128 /* gpu pointer size in units of 32bit registers/slots */
129 static unsigned pointer_size(struct ir3_context *ctx)
130 {
131 return (ctx->compiler->gpu_id >= 500) ? 2 : 1;
132 }
133
134 static struct ir3_instruction * create_immed(struct ir3_block *block, uint32_t val);
135 static struct ir3_block * get_block(struct ir3_context *ctx, const nir_block *nblock);
136
137
138 static struct ir3_context *
139 compile_init(struct ir3_compiler *compiler,
140 struct ir3_shader_variant *so)
141 {
142 struct ir3_context *ctx = rzalloc(NULL, struct ir3_context);
143
144 if (compiler->gpu_id >= 400) {
145 if (so->type == MESA_SHADER_VERTEX) {
146 ctx->astc_srgb = so->key.vastc_srgb;
147 } else if (so->type == MESA_SHADER_FRAGMENT) {
148 ctx->astc_srgb = so->key.fastc_srgb;
149 }
150
151 } else {
152 if (so->type == MESA_SHADER_VERTEX) {
153 ctx->samples = so->key.vsamples;
154 } else if (so->type == MESA_SHADER_FRAGMENT) {
155 ctx->samples = so->key.fsamples;
156 }
157 }
158
159 ctx->compiler = compiler;
160 ctx->so = so;
161 ctx->def_ht = _mesa_hash_table_create(ctx,
162 _mesa_hash_pointer, _mesa_key_pointer_equal);
163 ctx->block_ht = _mesa_hash_table_create(ctx,
164 _mesa_hash_pointer, _mesa_key_pointer_equal);
165
166 /* TODO: maybe generate some sort of bitmask of what key
167 * lowers vs what shader has (ie. no need to lower
168 * texture clamp lowering if no texture sample instrs)..
169 * although should be done further up the stack to avoid
170 * creating duplicate variants..
171 */
172
173 if (ir3_key_lowers_nir(&so->key)) {
174 nir_shader *s = nir_shader_clone(ctx, so->shader->nir);
175 ctx->s = ir3_optimize_nir(so->shader, s, &so->key);
176 } else {
177 /* fast-path for shader key that lowers nothing in NIR: */
178 ctx->s = so->shader->nir;
179 }
180
181 /* this needs to be the last pass run, so do this here instead of
182 * in ir3_optimize_nir():
183 */
184 NIR_PASS_V(ctx->s, nir_lower_locals_to_regs);
185 NIR_PASS_V(ctx->s, nir_convert_from_ssa, true);
186
187 if (ir3_shader_debug & IR3_DBG_DISASM) {
188 printf("dump nir%dv%d: type=%d, k={cts=%u,hp=%u}",
189 so->shader->id, so->id, so->type,
190 so->key.color_two_side, so->key.half_precision);
191 nir_print_shader(ctx->s, stdout);
192 }
193
194 if (shader_debug_enabled(so->type)) {
195 fprintf(stderr, "NIR (final form) for %s shader:\n",
196 _mesa_shader_stage_to_string(so->type));
197 nir_print_shader(ctx->s, stderr);
198 }
199
200 ir3_nir_scan_driver_consts(ctx->s, &so->const_layout);
201
202 so->num_uniforms = ctx->s->num_uniforms;
203 so->num_ubos = ctx->s->info.num_ubos;
204
205 /* Layout of constant registers, each section aligned to vec4. Note
206 * that pointer size (ubo, etc) changes depending on generation.
207 *
208 * user consts
209 * UBO addresses
210 * SSBO sizes
211 * if (vertex shader) {
212 * driver params (IR3_DP_*)
213 * if (stream_output.num_outputs > 0)
214 * stream-out addresses
215 * }
216 * immediates
217 *
218 * Immediates go last mostly because they are inserted in the CP pass
219 * after the nir -> ir3 frontend.
220 */
221 unsigned constoff = align(ctx->s->num_uniforms, 4);
222 unsigned ptrsz = pointer_size(ctx);
223
224 memset(&so->constbase, ~0, sizeof(so->constbase));
225
226 if (so->num_ubos > 0) {
227 so->constbase.ubo = constoff;
228 constoff += align(ctx->s->info.num_ubos * ptrsz, 4) / 4;
229 }
230
231 if (so->const_layout.ssbo_size.count > 0) {
232 unsigned cnt = so->const_layout.ssbo_size.count;
233 so->constbase.ssbo_sizes = constoff;
234 constoff += align(cnt, 4) / 4;
235 }
236
237 if (so->const_layout.image_dims.count > 0) {
238 unsigned cnt = so->const_layout.image_dims.count;
239 so->constbase.image_dims = constoff;
240 constoff += align(cnt, 4) / 4;
241 }
242
243 unsigned num_driver_params = 0;
244 if (so->type == MESA_SHADER_VERTEX) {
245 num_driver_params = IR3_DP_VS_COUNT;
246 } else if (so->type == MESA_SHADER_COMPUTE) {
247 num_driver_params = IR3_DP_CS_COUNT;
248 }
249
250 so->constbase.driver_param = constoff;
251 constoff += align(num_driver_params, 4) / 4;
252
253 if ((so->type == MESA_SHADER_VERTEX) &&
254 (compiler->gpu_id < 500) &&
255 so->shader->stream_output.num_outputs > 0) {
256 so->constbase.tfbo = constoff;
257 constoff += align(IR3_MAX_SO_BUFFERS * ptrsz, 4) / 4;
258 }
259
260 so->constbase.immediate = constoff;
261
262 return ctx;
263 }
264
265 static void
266 compile_error(struct ir3_context *ctx, const char *format, ...)
267 {
268 struct hash_table *errors = NULL;
269 va_list ap;
270 va_start(ap, format);
271 if (ctx->cur_instr) {
272 errors = _mesa_hash_table_create(NULL,
273 _mesa_hash_pointer,
274 _mesa_key_pointer_equal);
275 char *msg = ralloc_vasprintf(errors, format, ap);
276 _mesa_hash_table_insert(errors, ctx->cur_instr, msg);
277 } else {
278 _debug_vprintf(format, ap);
279 }
280 va_end(ap);
281 nir_print_shader_annotated(ctx->s, stdout, errors);
282 ralloc_free(errors);
283 ctx->error = true;
284 debug_assert(0);
285 }
286
287 #define compile_assert(ctx, cond) do { \
288 if (!(cond)) compile_error((ctx), "failed assert: "#cond"\n"); \
289 } while (0)
290
291 static void
292 compile_free(struct ir3_context *ctx)
293 {
294 ralloc_free(ctx);
295 }
296
297 static void
298 declare_array(struct ir3_context *ctx, nir_register *reg)
299 {
300 struct ir3_array *arr = rzalloc(ctx, struct ir3_array);
301 arr->id = ++ctx->num_arrays;
302 /* NOTE: sometimes we get non array regs, for example for arrays of
303 * length 1. See fs-const-array-of-struct-of-array.shader_test. So
304 * treat a non-array as if it was an array of length 1.
305 *
306 * It would be nice if there was a nir pass to convert arrays of
307 * length 1 to ssa.
308 */
309 arr->length = reg->num_components * MAX2(1, reg->num_array_elems);
310 compile_assert(ctx, arr->length > 0);
311 arr->r = reg;
312 list_addtail(&arr->node, &ctx->ir->array_list);
313 }
314
315 static struct ir3_array *
316 get_array(struct ir3_context *ctx, nir_register *reg)
317 {
318 list_for_each_entry (struct ir3_array, arr, &ctx->ir->array_list, node) {
319 if (arr->r == reg)
320 return arr;
321 }
322 compile_error(ctx, "bogus reg: %s\n", reg->name);
323 return NULL;
324 }
325
326 /* relative (indirect) if address!=NULL */
327 static struct ir3_instruction *
328 create_array_load(struct ir3_context *ctx, struct ir3_array *arr, int n,
329 struct ir3_instruction *address)
330 {
331 struct ir3_block *block = ctx->block;
332 struct ir3_instruction *mov;
333 struct ir3_register *src;
334
335 mov = ir3_instr_create(block, OPC_MOV);
336 mov->cat1.src_type = TYPE_U32;
337 mov->cat1.dst_type = TYPE_U32;
338 mov->barrier_class = IR3_BARRIER_ARRAY_R;
339 mov->barrier_conflict = IR3_BARRIER_ARRAY_W;
340 ir3_reg_create(mov, 0, 0);
341 src = ir3_reg_create(mov, 0, IR3_REG_ARRAY |
342 COND(address, IR3_REG_RELATIV));
343 src->instr = arr->last_write;
344 src->size = arr->length;
345 src->array.id = arr->id;
346 src->array.offset = n;
347
348 if (address)
349 ir3_instr_set_address(mov, address);
350
351 return mov;
352 }
353
354 /* relative (indirect) if address!=NULL */
355 static void
356 create_array_store(struct ir3_context *ctx, struct ir3_array *arr, int n,
357 struct ir3_instruction *src, struct ir3_instruction *address)
358 {
359 struct ir3_block *block = ctx->block;
360 struct ir3_instruction *mov;
361 struct ir3_register *dst;
362
363 /* if not relative store, don't create an extra mov, since that
364 * ends up being difficult for cp to remove.
365 */
366 if (!address) {
367 dst = src->regs[0];
368
369 src->barrier_class |= IR3_BARRIER_ARRAY_W;
370 src->barrier_conflict |= IR3_BARRIER_ARRAY_R | IR3_BARRIER_ARRAY_W;
371
372 dst->flags |= IR3_REG_ARRAY;
373 dst->instr = arr->last_write;
374 dst->size = arr->length;
375 dst->array.id = arr->id;
376 dst->array.offset = n;
377
378 arr->last_write = src;
379
380 array_insert(block, block->keeps, src);
381
382 return;
383 }
384
385 mov = ir3_instr_create(block, OPC_MOV);
386 mov->cat1.src_type = TYPE_U32;
387 mov->cat1.dst_type = TYPE_U32;
388 mov->barrier_class = IR3_BARRIER_ARRAY_W;
389 mov->barrier_conflict = IR3_BARRIER_ARRAY_R | IR3_BARRIER_ARRAY_W;
390 dst = ir3_reg_create(mov, 0, IR3_REG_ARRAY |
391 COND(address, IR3_REG_RELATIV));
392 dst->instr = arr->last_write;
393 dst->size = arr->length;
394 dst->array.id = arr->id;
395 dst->array.offset = n;
396 ir3_reg_create(mov, 0, IR3_REG_SSA)->instr = src;
397
398 if (address)
399 ir3_instr_set_address(mov, address);
400
401 arr->last_write = mov;
402
403 /* the array store may only matter to something in an earlier
404 * block (ie. loops), but since arrays are not in SSA, depth
405 * pass won't know this.. so keep all array stores:
406 */
407 array_insert(block, block->keeps, mov);
408 }
409
410 static inline type_t utype_for_size(unsigned bit_size)
411 {
412 switch (bit_size) {
413 case 32: return TYPE_U32;
414 case 16: return TYPE_U16;
415 case 8: return TYPE_U8;
416 default: unreachable("bad bitsize"); return ~0;
417 }
418 }
419
420 static inline type_t utype_src(nir_src src)
421 { return utype_for_size(nir_src_bit_size(src)); }
422
423 static inline type_t utype_dst(nir_dest dst)
424 { return utype_for_size(nir_dest_bit_size(dst)); }
425
426 /* allocate a n element value array (to be populated by caller) and
427 * insert in def_ht
428 */
429 static struct ir3_instruction **
430 get_dst_ssa(struct ir3_context *ctx, nir_ssa_def *dst, unsigned n)
431 {
432 struct ir3_instruction **value =
433 ralloc_array(ctx->def_ht, struct ir3_instruction *, n);
434 _mesa_hash_table_insert(ctx->def_ht, dst, value);
435 return value;
436 }
437
438 static struct ir3_instruction **
439 get_dst(struct ir3_context *ctx, nir_dest *dst, unsigned n)
440 {
441 struct ir3_instruction **value;
442
443 if (dst->is_ssa) {
444 value = get_dst_ssa(ctx, &dst->ssa, n);
445 } else {
446 value = ralloc_array(ctx, struct ir3_instruction *, n);
447 }
448
449 /* NOTE: in non-ssa case, we don't really need to store last_dst
450 * but this helps us catch cases where put_dst() call is forgotten
451 */
452 compile_assert(ctx, !ctx->last_dst);
453 ctx->last_dst = value;
454 ctx->last_dst_n = n;
455
456 return value;
457 }
458
459 static struct ir3_instruction * get_addr(struct ir3_context *ctx, struct ir3_instruction *src, int align);
460
461 static struct ir3_instruction * const *
462 get_src(struct ir3_context *ctx, nir_src *src)
463 {
464 if (src->is_ssa) {
465 struct hash_entry *entry;
466 entry = _mesa_hash_table_search(ctx->def_ht, src->ssa);
467 compile_assert(ctx, entry);
468 return entry->data;
469 } else {
470 nir_register *reg = src->reg.reg;
471 struct ir3_array *arr = get_array(ctx, reg);
472 unsigned num_components = arr->r->num_components;
473 struct ir3_instruction *addr = NULL;
474 struct ir3_instruction **value =
475 ralloc_array(ctx, struct ir3_instruction *, num_components);
476
477 if (src->reg.indirect)
478 addr = get_addr(ctx, get_src(ctx, src->reg.indirect)[0],
479 reg->num_components);
480
481 for (unsigned i = 0; i < num_components; i++) {
482 unsigned n = src->reg.base_offset * reg->num_components + i;
483 compile_assert(ctx, n < arr->length);
484 value[i] = create_array_load(ctx, arr, n, addr);
485 }
486
487 return value;
488 }
489 }
490
491 static void
492 put_dst(struct ir3_context *ctx, nir_dest *dst)
493 {
494 unsigned bit_size = nir_dest_bit_size(*dst);
495
496 if (bit_size < 32) {
497 for (unsigned i = 0; i < ctx->last_dst_n; i++) {
498 struct ir3_instruction *dst = ctx->last_dst[i];
499 dst->regs[0]->flags |= IR3_REG_HALF;
500 if (ctx->last_dst[i]->opc == OPC_META_FO)
501 dst->regs[1]->instr->regs[0]->flags |= IR3_REG_HALF;
502 }
503 }
504
505 if (!dst->is_ssa) {
506 nir_register *reg = dst->reg.reg;
507 struct ir3_array *arr = get_array(ctx, reg);
508 unsigned num_components = ctx->last_dst_n;
509 struct ir3_instruction *addr = NULL;
510
511 if (dst->reg.indirect)
512 addr = get_addr(ctx, get_src(ctx, dst->reg.indirect)[0],
513 reg->num_components);
514
515 for (unsigned i = 0; i < num_components; i++) {
516 unsigned n = dst->reg.base_offset * reg->num_components + i;
517 compile_assert(ctx, n < arr->length);
518 if (!ctx->last_dst[i])
519 continue;
520 create_array_store(ctx, arr, n, ctx->last_dst[i], addr);
521 }
522
523 ralloc_free(ctx->last_dst);
524 }
525 ctx->last_dst = NULL;
526 ctx->last_dst_n = 0;
527 }
528
529 static struct ir3_instruction *
530 create_immed_typed(struct ir3_block *block, uint32_t val, type_t type)
531 {
532 struct ir3_instruction *mov;
533 unsigned flags = (type_size(type) < 32) ? IR3_REG_HALF : 0;
534
535 mov = ir3_instr_create(block, OPC_MOV);
536 mov->cat1.src_type = type;
537 mov->cat1.dst_type = type;
538 ir3_reg_create(mov, 0, flags);
539 ir3_reg_create(mov, 0, IR3_REG_IMMED)->uim_val = val;
540
541 return mov;
542 }
543
544 static struct ir3_instruction *
545 create_immed(struct ir3_block *block, uint32_t val)
546 {
547 return create_immed_typed(block, val, TYPE_U32);
548 }
549
550 static struct ir3_instruction *
551 create_addr(struct ir3_block *block, struct ir3_instruction *src, int align)
552 {
553 struct ir3_instruction *instr, *immed;
554
555 /* TODO in at least some cases, the backend could probably be
556 * made clever enough to propagate IR3_REG_HALF..
557 */
558 instr = ir3_COV(block, src, TYPE_U32, TYPE_S16);
559 instr->regs[0]->flags |= IR3_REG_HALF;
560
561 switch(align){
562 case 1:
563 /* src *= 1: */
564 break;
565 case 2:
566 /* src *= 2 => src <<= 1: */
567 immed = create_immed(block, 1);
568 immed->regs[0]->flags |= IR3_REG_HALF;
569
570 instr = ir3_SHL_B(block, instr, 0, immed, 0);
571 instr->regs[0]->flags |= IR3_REG_HALF;
572 instr->regs[1]->flags |= IR3_REG_HALF;
573 break;
574 case 3:
575 /* src *= 3: */
576 immed = create_immed(block, 3);
577 immed->regs[0]->flags |= IR3_REG_HALF;
578
579 instr = ir3_MULL_U(block, instr, 0, immed, 0);
580 instr->regs[0]->flags |= IR3_REG_HALF;
581 instr->regs[1]->flags |= IR3_REG_HALF;
582 break;
583 case 4:
584 /* src *= 4 => src <<= 2: */
585 immed = create_immed(block, 2);
586 immed->regs[0]->flags |= IR3_REG_HALF;
587
588 instr = ir3_SHL_B(block, instr, 0, immed, 0);
589 instr->regs[0]->flags |= IR3_REG_HALF;
590 instr->regs[1]->flags |= IR3_REG_HALF;
591 break;
592 default:
593 unreachable("bad align");
594 return NULL;
595 }
596
597 instr = ir3_MOV(block, instr, TYPE_S16);
598 instr->regs[0]->num = regid(REG_A0, 0);
599 instr->regs[0]->flags |= IR3_REG_HALF;
600 instr->regs[1]->flags |= IR3_REG_HALF;
601
602 return instr;
603 }
604
605 /* caches addr values to avoid generating multiple cov/shl/mova
606 * sequences for each use of a given NIR level src as address
607 */
608 static struct ir3_instruction *
609 get_addr(struct ir3_context *ctx, struct ir3_instruction *src, int align)
610 {
611 struct ir3_instruction *addr;
612 unsigned idx = align - 1;
613
614 compile_assert(ctx, idx < ARRAY_SIZE(ctx->addr_ht));
615
616 if (!ctx->addr_ht[idx]) {
617 ctx->addr_ht[idx] = _mesa_hash_table_create(ctx,
618 _mesa_hash_pointer, _mesa_key_pointer_equal);
619 } else {
620 struct hash_entry *entry;
621 entry = _mesa_hash_table_search(ctx->addr_ht[idx], src);
622 if (entry)
623 return entry->data;
624 }
625
626 addr = create_addr(ctx->block, src, align);
627 _mesa_hash_table_insert(ctx->addr_ht[idx], src, addr);
628
629 return addr;
630 }
631
632 static struct ir3_instruction *
633 get_predicate(struct ir3_context *ctx, struct ir3_instruction *src)
634 {
635 struct ir3_block *b = ctx->block;
636 struct ir3_instruction *cond;
637
638 /* NOTE: only cmps.*.* can write p0.x: */
639 cond = ir3_CMPS_S(b, src, 0, create_immed(b, 0), 0);
640 cond->cat2.condition = IR3_COND_NE;
641
642 /* condition always goes in predicate register: */
643 cond->regs[0]->num = regid(REG_P0, 0);
644
645 return cond;
646 }
647
648 static struct ir3_instruction *
649 create_uniform(struct ir3_context *ctx, unsigned n)
650 {
651 struct ir3_instruction *mov;
652
653 mov = ir3_instr_create(ctx->block, OPC_MOV);
654 /* TODO get types right? */
655 mov->cat1.src_type = TYPE_F32;
656 mov->cat1.dst_type = TYPE_F32;
657 ir3_reg_create(mov, 0, 0);
658 ir3_reg_create(mov, n, IR3_REG_CONST);
659
660 return mov;
661 }
662
663 static struct ir3_instruction *
664 create_uniform_indirect(struct ir3_context *ctx, int n,
665 struct ir3_instruction *address)
666 {
667 struct ir3_instruction *mov;
668
669 mov = ir3_instr_create(ctx->block, OPC_MOV);
670 mov->cat1.src_type = TYPE_U32;
671 mov->cat1.dst_type = TYPE_U32;
672 ir3_reg_create(mov, 0, 0);
673 ir3_reg_create(mov, 0, IR3_REG_CONST | IR3_REG_RELATIV)->array.offset = n;
674
675 ir3_instr_set_address(mov, address);
676
677 return mov;
678 }
679
680 static struct ir3_instruction *
681 create_collect(struct ir3_context *ctx, struct ir3_instruction *const *arr,
682 unsigned arrsz)
683 {
684 struct ir3_block *block = ctx->block;
685 struct ir3_instruction *collect;
686
687 if (arrsz == 0)
688 return NULL;
689
690 unsigned flags = arr[0]->regs[0]->flags & IR3_REG_HALF;
691
692 collect = ir3_instr_create2(block, OPC_META_FI, 1 + arrsz);
693 ir3_reg_create(collect, 0, flags); /* dst */
694 for (unsigned i = 0; i < arrsz; i++) {
695 struct ir3_instruction *elem = arr[i];
696
697 /* Since arrays are pre-colored in RA, we can't assume that
698 * things will end up in the right place. (Ie. if a collect
699 * joins elements from two different arrays.) So insert an
700 * extra mov.
701 *
702 * We could possibly skip this if all the collected elements
703 * are contiguous elements in a single array.. not sure how
704 * likely that is to happen.
705 *
706 * Fixes a problem with glamor shaders, that in effect do
707 * something like:
708 *
709 * if (foo)
710 * texcoord = ..
711 * else
712 * texcoord = ..
713 * color = texture2D(tex, texcoord);
714 *
715 * In this case, texcoord will end up as nir registers (which
716 * translate to ir3 array's of length 1. And we can't assume
717 * the two (or more) arrays will get allocated in consecutive
718 * scalar registers.
719 *
720 */
721 if (elem->regs[0]->flags & IR3_REG_ARRAY) {
722 type_t type = (flags & IR3_REG_HALF) ? TYPE_U16 : TYPE_U32;
723 elem = ir3_MOV(block, elem, type);
724 }
725
726 compile_assert(ctx, (elem->regs[0]->flags & IR3_REG_HALF) == flags);
727 ir3_reg_create(collect, 0, IR3_REG_SSA | flags)->instr = elem;
728 }
729
730 return collect;
731 }
732
733 static struct ir3_instruction *
734 create_indirect_load(struct ir3_context *ctx, unsigned arrsz, int n,
735 struct ir3_instruction *address, struct ir3_instruction *collect)
736 {
737 struct ir3_block *block = ctx->block;
738 struct ir3_instruction *mov;
739 struct ir3_register *src;
740
741 mov = ir3_instr_create(block, OPC_MOV);
742 mov->cat1.src_type = TYPE_U32;
743 mov->cat1.dst_type = TYPE_U32;
744 ir3_reg_create(mov, 0, 0);
745 src = ir3_reg_create(mov, 0, IR3_REG_SSA | IR3_REG_RELATIV);
746 src->instr = collect;
747 src->size = arrsz;
748 src->array.offset = n;
749
750 ir3_instr_set_address(mov, address);
751
752 return mov;
753 }
754
755 static struct ir3_instruction *
756 create_input_compmask(struct ir3_context *ctx, unsigned n, unsigned compmask)
757 {
758 struct ir3_instruction *in;
759
760 in = ir3_instr_create(ctx->in_block, OPC_META_INPUT);
761 in->inout.block = ctx->in_block;
762 ir3_reg_create(in, n, 0);
763
764 in->regs[0]->wrmask = compmask;
765
766 return in;
767 }
768
769 static struct ir3_instruction *
770 create_input(struct ir3_context *ctx, unsigned n)
771 {
772 return create_input_compmask(ctx, n, 0x1);
773 }
774
775 static struct ir3_instruction *
776 create_frag_input(struct ir3_context *ctx, bool use_ldlv)
777 {
778 struct ir3_block *block = ctx->block;
779 struct ir3_instruction *instr;
780 /* actual inloc is assigned and fixed up later: */
781 struct ir3_instruction *inloc = create_immed(block, 0);
782
783 if (use_ldlv) {
784 instr = ir3_LDLV(block, inloc, 0, create_immed(block, 1), 0);
785 instr->cat6.type = TYPE_U32;
786 instr->cat6.iim_val = 1;
787 } else {
788 instr = ir3_BARY_F(block, inloc, 0, ctx->frag_vcoord, 0);
789 instr->regs[2]->wrmask = 0x3;
790 }
791
792 return instr;
793 }
794
795 static struct ir3_instruction *
796 create_driver_param(struct ir3_context *ctx, enum ir3_driver_param dp)
797 {
798 /* first four vec4 sysval's reserved for UBOs: */
799 /* NOTE: dp is in scalar, but there can be >4 dp components: */
800 unsigned n = ctx->so->constbase.driver_param;
801 unsigned r = regid(n + dp / 4, dp % 4);
802 return create_uniform(ctx, r);
803 }
804
805 /* helper for instructions that produce multiple consecutive scalar
806 * outputs which need to have a split/fanout meta instruction inserted
807 */
808 static void
809 split_dest(struct ir3_block *block, struct ir3_instruction **dst,
810 struct ir3_instruction *src, unsigned base, unsigned n)
811 {
812 struct ir3_instruction *prev = NULL;
813
814 if ((n == 1) && (src->regs[0]->wrmask == 0x1)) {
815 dst[0] = src;
816 return;
817 }
818
819 for (int i = 0, j = 0; i < n; i++) {
820 struct ir3_instruction *split = ir3_instr_create(block, OPC_META_FO);
821 ir3_reg_create(split, 0, IR3_REG_SSA);
822 ir3_reg_create(split, 0, IR3_REG_SSA)->instr = src;
823 split->fo.off = i + base;
824
825 if (prev) {
826 split->cp.left = prev;
827 split->cp.left_cnt++;
828 prev->cp.right = split;
829 prev->cp.right_cnt++;
830 }
831 prev = split;
832
833 if (src->regs[0]->wrmask & (1 << (i + base)))
834 dst[j++] = split;
835 }
836 }
837
838 /*
839 * Adreno uses uint rather than having dedicated bool type,
840 * which (potentially) requires some conversion, in particular
841 * when using output of an bool instr to int input, or visa
842 * versa.
843 *
844 * | Adreno | NIR |
845 * -------+---------+-------+-
846 * true | 1 | ~0 |
847 * false | 0 | 0 |
848 *
849 * To convert from an adreno bool (uint) to nir, use:
850 *
851 * absneg.s dst, (neg)src
852 *
853 * To convert back in the other direction:
854 *
855 * absneg.s dst, (abs)arc
856 *
857 * The CP step can clean up the absneg.s that cancel each other
858 * out, and with a slight bit of extra cleverness (to recognize
859 * the instructions which produce either a 0 or 1) can eliminate
860 * the absneg.s's completely when an instruction that wants
861 * 0/1 consumes the result. For example, when a nir 'bcsel'
862 * consumes the result of 'feq'. So we should be able to get by
863 * without a boolean resolve step, and without incuring any
864 * extra penalty in instruction count.
865 */
866
867 /* NIR bool -> native (adreno): */
868 static struct ir3_instruction *
869 ir3_b2n(struct ir3_block *block, struct ir3_instruction *instr)
870 {
871 return ir3_ABSNEG_S(block, instr, IR3_REG_SABS);
872 }
873
874 /* native (adreno) -> NIR bool: */
875 static struct ir3_instruction *
876 ir3_n2b(struct ir3_block *block, struct ir3_instruction *instr)
877 {
878 return ir3_ABSNEG_S(block, instr, IR3_REG_SNEG);
879 }
880
881 /*
882 * alu/sfu instructions:
883 */
884
885 static struct ir3_instruction *
886 create_cov(struct ir3_context *ctx, struct ir3_instruction *src,
887 unsigned src_bitsize, nir_op op)
888 {
889 type_t src_type, dst_type;
890
891 switch (op) {
892 case nir_op_f2f32:
893 case nir_op_f2f16_rtne:
894 case nir_op_f2f16_rtz:
895 case nir_op_f2f16:
896 case nir_op_f2i32:
897 case nir_op_f2i16:
898 case nir_op_f2i8:
899 case nir_op_f2u32:
900 case nir_op_f2u16:
901 case nir_op_f2u8:
902 switch (src_bitsize) {
903 case 32:
904 src_type = TYPE_F32;
905 break;
906 case 16:
907 src_type = TYPE_F16;
908 break;
909 default:
910 compile_error(ctx, "invalid src bit size: %u", src_bitsize);
911 }
912 break;
913
914 case nir_op_i2f32:
915 case nir_op_i2f16:
916 case nir_op_i2i32:
917 case nir_op_i2i16:
918 case nir_op_i2i8:
919 switch (src_bitsize) {
920 case 32:
921 src_type = TYPE_S32;
922 break;
923 case 16:
924 src_type = TYPE_S16;
925 break;
926 case 8:
927 src_type = TYPE_S8;
928 break;
929 default:
930 compile_error(ctx, "invalid src bit size: %u", src_bitsize);
931 }
932 break;
933
934 case nir_op_u2f32:
935 case nir_op_u2f16:
936 case nir_op_u2u32:
937 case nir_op_u2u16:
938 case nir_op_u2u8:
939 switch (src_bitsize) {
940 case 32:
941 src_type = TYPE_U32;
942 break;
943 case 16:
944 src_type = TYPE_U16;
945 break;
946 case 8:
947 src_type = TYPE_U8;
948 break;
949 default:
950 compile_error(ctx, "invalid src bit size: %u", src_bitsize);
951 }
952 break;
953
954 default:
955 compile_error(ctx, "invalid conversion op: %u", op);
956 }
957
958 switch (op) {
959 case nir_op_f2f32:
960 case nir_op_i2f32:
961 case nir_op_u2f32:
962 dst_type = TYPE_F32;
963 break;
964
965 case nir_op_f2f16_rtne:
966 case nir_op_f2f16_rtz:
967 case nir_op_f2f16:
968 /* TODO how to handle rounding mode? */
969 case nir_op_i2f16:
970 case nir_op_u2f16:
971 dst_type = TYPE_F16;
972 break;
973
974 case nir_op_f2i32:
975 case nir_op_i2i32:
976 dst_type = TYPE_S32;
977 break;
978
979 case nir_op_f2i16:
980 case nir_op_i2i16:
981 dst_type = TYPE_S16;
982 break;
983
984 case nir_op_f2i8:
985 case nir_op_i2i8:
986 dst_type = TYPE_S8;
987 break;
988
989 case nir_op_f2u32:
990 case nir_op_u2u32:
991 dst_type = TYPE_U32;
992 break;
993
994 case nir_op_f2u16:
995 case nir_op_u2u16:
996 dst_type = TYPE_U16;
997 break;
998
999 case nir_op_f2u8:
1000 case nir_op_u2u8:
1001 dst_type = TYPE_U8;
1002 break;
1003
1004 default:
1005 compile_error(ctx, "invalid conversion op: %u", op);
1006 }
1007
1008 return ir3_COV(ctx->block, src, src_type, dst_type);
1009 }
1010
1011 static void
1012 emit_alu(struct ir3_context *ctx, nir_alu_instr *alu)
1013 {
1014 const nir_op_info *info = &nir_op_infos[alu->op];
1015 struct ir3_instruction **dst, *src[info->num_inputs];
1016 unsigned bs[info->num_inputs]; /* bit size */
1017 struct ir3_block *b = ctx->block;
1018 unsigned dst_sz, wrmask;
1019
1020 if (alu->dest.dest.is_ssa) {
1021 dst_sz = alu->dest.dest.ssa.num_components;
1022 wrmask = (1 << dst_sz) - 1;
1023 } else {
1024 dst_sz = alu->dest.dest.reg.reg->num_components;
1025 wrmask = alu->dest.write_mask;
1026 }
1027
1028 dst = get_dst(ctx, &alu->dest.dest, dst_sz);
1029
1030 /* Vectors are special in that they have non-scalarized writemasks,
1031 * and just take the first swizzle channel for each argument in
1032 * order into each writemask channel.
1033 */
1034 if ((alu->op == nir_op_vec2) ||
1035 (alu->op == nir_op_vec3) ||
1036 (alu->op == nir_op_vec4)) {
1037
1038 for (int i = 0; i < info->num_inputs; i++) {
1039 nir_alu_src *asrc = &alu->src[i];
1040
1041 compile_assert(ctx, !asrc->abs);
1042 compile_assert(ctx, !asrc->negate);
1043
1044 src[i] = get_src(ctx, &asrc->src)[asrc->swizzle[0]];
1045 if (!src[i])
1046 src[i] = create_immed(ctx->block, 0);
1047 dst[i] = ir3_MOV(b, src[i], TYPE_U32);
1048 }
1049
1050 put_dst(ctx, &alu->dest.dest);
1051 return;
1052 }
1053
1054 /* We also get mov's with more than one component for mov's so
1055 * handle those specially:
1056 */
1057 if ((alu->op == nir_op_imov) || (alu->op == nir_op_fmov)) {
1058 type_t type = (alu->op == nir_op_imov) ? TYPE_U32 : TYPE_F32;
1059 nir_alu_src *asrc = &alu->src[0];
1060 struct ir3_instruction *const *src0 = get_src(ctx, &asrc->src);
1061
1062 for (unsigned i = 0; i < dst_sz; i++) {
1063 if (wrmask & (1 << i)) {
1064 dst[i] = ir3_MOV(b, src0[asrc->swizzle[i]], type);
1065 } else {
1066 dst[i] = NULL;
1067 }
1068 }
1069
1070 put_dst(ctx, &alu->dest.dest);
1071 return;
1072 }
1073
1074 /* General case: We can just grab the one used channel per src. */
1075 for (int i = 0; i < info->num_inputs; i++) {
1076 unsigned chan = ffs(alu->dest.write_mask) - 1;
1077 nir_alu_src *asrc = &alu->src[i];
1078
1079 compile_assert(ctx, !asrc->abs);
1080 compile_assert(ctx, !asrc->negate);
1081
1082 src[i] = get_src(ctx, &asrc->src)[asrc->swizzle[chan]];
1083 bs[i] = nir_src_bit_size(asrc->src);
1084
1085 compile_assert(ctx, src[i]);
1086 }
1087
1088 switch (alu->op) {
1089 case nir_op_f2f32:
1090 case nir_op_f2f16_rtne:
1091 case nir_op_f2f16_rtz:
1092 case nir_op_f2f16:
1093 case nir_op_f2i32:
1094 case nir_op_f2i16:
1095 case nir_op_f2i8:
1096 case nir_op_f2u32:
1097 case nir_op_f2u16:
1098 case nir_op_f2u8:
1099 case nir_op_i2f32:
1100 case nir_op_i2f16:
1101 case nir_op_i2i32:
1102 case nir_op_i2i16:
1103 case nir_op_i2i8:
1104 case nir_op_u2f32:
1105 case nir_op_u2f16:
1106 case nir_op_u2u32:
1107 case nir_op_u2u16:
1108 case nir_op_u2u8:
1109 dst[0] = create_cov(ctx, src[0], bs[0], alu->op);
1110 break;
1111 case nir_op_f2b:
1112 dst[0] = ir3_CMPS_F(b, src[0], 0, create_immed(b, fui(0.0)), 0);
1113 dst[0]->cat2.condition = IR3_COND_NE;
1114 dst[0] = ir3_n2b(b, dst[0]);
1115 break;
1116 case nir_op_b2f:
1117 dst[0] = ir3_COV(b, ir3_b2n(b, src[0]), TYPE_U32, TYPE_F32);
1118 break;
1119 case nir_op_b2i:
1120 dst[0] = ir3_b2n(b, src[0]);
1121 break;
1122 case nir_op_i2b:
1123 dst[0] = ir3_CMPS_S(b, src[0], 0, create_immed(b, 0), 0);
1124 dst[0]->cat2.condition = IR3_COND_NE;
1125 dst[0] = ir3_n2b(b, dst[0]);
1126 break;
1127
1128 case nir_op_fneg:
1129 dst[0] = ir3_ABSNEG_F(b, src[0], IR3_REG_FNEG);
1130 break;
1131 case nir_op_fabs:
1132 dst[0] = ir3_ABSNEG_F(b, src[0], IR3_REG_FABS);
1133 break;
1134 case nir_op_fmax:
1135 dst[0] = ir3_MAX_F(b, src[0], 0, src[1], 0);
1136 break;
1137 case nir_op_fmin:
1138 dst[0] = ir3_MIN_F(b, src[0], 0, src[1], 0);
1139 break;
1140 case nir_op_fsat:
1141 /* if there is just a single use of the src, and it supports
1142 * (sat) bit, we can just fold the (sat) flag back to the
1143 * src instruction and create a mov. This is easier for cp
1144 * to eliminate.
1145 *
1146 * TODO probably opc_cat==4 is ok too
1147 */
1148 if (alu->src[0].src.is_ssa &&
1149 (list_length(&alu->src[0].src.ssa->uses) == 1) &&
1150 ((opc_cat(src[0]->opc) == 2) || (opc_cat(src[0]->opc) == 3))) {
1151 src[0]->flags |= IR3_INSTR_SAT;
1152 dst[0] = ir3_MOV(b, src[0], TYPE_U32);
1153 } else {
1154 /* otherwise generate a max.f that saturates.. blob does
1155 * similar (generating a cat2 mov using max.f)
1156 */
1157 dst[0] = ir3_MAX_F(b, src[0], 0, src[0], 0);
1158 dst[0]->flags |= IR3_INSTR_SAT;
1159 }
1160 break;
1161 case nir_op_fmul:
1162 dst[0] = ir3_MUL_F(b, src[0], 0, src[1], 0);
1163 break;
1164 case nir_op_fadd:
1165 dst[0] = ir3_ADD_F(b, src[0], 0, src[1], 0);
1166 break;
1167 case nir_op_fsub:
1168 dst[0] = ir3_ADD_F(b, src[0], 0, src[1], IR3_REG_FNEG);
1169 break;
1170 case nir_op_ffma:
1171 dst[0] = ir3_MAD_F32(b, src[0], 0, src[1], 0, src[2], 0);
1172 break;
1173 case nir_op_fddx:
1174 dst[0] = ir3_DSX(b, src[0], 0);
1175 dst[0]->cat5.type = TYPE_F32;
1176 break;
1177 case nir_op_fddy:
1178 dst[0] = ir3_DSY(b, src[0], 0);
1179 dst[0]->cat5.type = TYPE_F32;
1180 break;
1181 break;
1182 case nir_op_flt:
1183 dst[0] = ir3_CMPS_F(b, src[0], 0, src[1], 0);
1184 dst[0]->cat2.condition = IR3_COND_LT;
1185 dst[0] = ir3_n2b(b, dst[0]);
1186 break;
1187 case nir_op_fge:
1188 dst[0] = ir3_CMPS_F(b, src[0], 0, src[1], 0);
1189 dst[0]->cat2.condition = IR3_COND_GE;
1190 dst[0] = ir3_n2b(b, dst[0]);
1191 break;
1192 case nir_op_feq:
1193 dst[0] = ir3_CMPS_F(b, src[0], 0, src[1], 0);
1194 dst[0]->cat2.condition = IR3_COND_EQ;
1195 dst[0] = ir3_n2b(b, dst[0]);
1196 break;
1197 case nir_op_fne:
1198 dst[0] = ir3_CMPS_F(b, src[0], 0, src[1], 0);
1199 dst[0]->cat2.condition = IR3_COND_NE;
1200 dst[0] = ir3_n2b(b, dst[0]);
1201 break;
1202 case nir_op_fceil:
1203 dst[0] = ir3_CEIL_F(b, src[0], 0);
1204 break;
1205 case nir_op_ffloor:
1206 dst[0] = ir3_FLOOR_F(b, src[0], 0);
1207 break;
1208 case nir_op_ftrunc:
1209 dst[0] = ir3_TRUNC_F(b, src[0], 0);
1210 break;
1211 case nir_op_fround_even:
1212 dst[0] = ir3_RNDNE_F(b, src[0], 0);
1213 break;
1214 case nir_op_fsign:
1215 dst[0] = ir3_SIGN_F(b, src[0], 0);
1216 break;
1217
1218 case nir_op_fsin:
1219 dst[0] = ir3_SIN(b, src[0], 0);
1220 break;
1221 case nir_op_fcos:
1222 dst[0] = ir3_COS(b, src[0], 0);
1223 break;
1224 case nir_op_frsq:
1225 dst[0] = ir3_RSQ(b, src[0], 0);
1226 break;
1227 case nir_op_frcp:
1228 dst[0] = ir3_RCP(b, src[0], 0);
1229 break;
1230 case nir_op_flog2:
1231 dst[0] = ir3_LOG2(b, src[0], 0);
1232 break;
1233 case nir_op_fexp2:
1234 dst[0] = ir3_EXP2(b, src[0], 0);
1235 break;
1236 case nir_op_fsqrt:
1237 dst[0] = ir3_SQRT(b, src[0], 0);
1238 break;
1239
1240 case nir_op_iabs:
1241 dst[0] = ir3_ABSNEG_S(b, src[0], IR3_REG_SABS);
1242 break;
1243 case nir_op_iadd:
1244 dst[0] = ir3_ADD_U(b, src[0], 0, src[1], 0);
1245 break;
1246 case nir_op_iand:
1247 dst[0] = ir3_AND_B(b, src[0], 0, src[1], 0);
1248 break;
1249 case nir_op_imax:
1250 dst[0] = ir3_MAX_S(b, src[0], 0, src[1], 0);
1251 break;
1252 case nir_op_umax:
1253 dst[0] = ir3_MAX_U(b, src[0], 0, src[1], 0);
1254 break;
1255 case nir_op_imin:
1256 dst[0] = ir3_MIN_S(b, src[0], 0, src[1], 0);
1257 break;
1258 case nir_op_umin:
1259 dst[0] = ir3_MIN_U(b, src[0], 0, src[1], 0);
1260 break;
1261 case nir_op_imul:
1262 /*
1263 * dst = (al * bl) + (ah * bl << 16) + (al * bh << 16)
1264 * mull.u tmp0, a, b ; mul low, i.e. al * bl
1265 * madsh.m16 tmp1, a, b, tmp0 ; mul-add shift high mix, i.e. ah * bl << 16
1266 * madsh.m16 dst, b, a, tmp1 ; i.e. al * bh << 16
1267 */
1268 dst[0] = ir3_MADSH_M16(b, src[1], 0, src[0], 0,
1269 ir3_MADSH_M16(b, src[0], 0, src[1], 0,
1270 ir3_MULL_U(b, src[0], 0, src[1], 0), 0), 0);
1271 break;
1272 case nir_op_ineg:
1273 dst[0] = ir3_ABSNEG_S(b, src[0], IR3_REG_SNEG);
1274 break;
1275 case nir_op_inot:
1276 dst[0] = ir3_NOT_B(b, src[0], 0);
1277 break;
1278 case nir_op_ior:
1279 dst[0] = ir3_OR_B(b, src[0], 0, src[1], 0);
1280 break;
1281 case nir_op_ishl:
1282 dst[0] = ir3_SHL_B(b, src[0], 0, src[1], 0);
1283 break;
1284 case nir_op_ishr:
1285 dst[0] = ir3_ASHR_B(b, src[0], 0, src[1], 0);
1286 break;
1287 case nir_op_isign: {
1288 /* maybe this would be sane to lower in nir.. */
1289 struct ir3_instruction *neg, *pos;
1290
1291 neg = ir3_CMPS_S(b, src[0], 0, create_immed(b, 0), 0);
1292 neg->cat2.condition = IR3_COND_LT;
1293
1294 pos = ir3_CMPS_S(b, src[0], 0, create_immed(b, 0), 0);
1295 pos->cat2.condition = IR3_COND_GT;
1296
1297 dst[0] = ir3_SUB_U(b, pos, 0, neg, 0);
1298
1299 break;
1300 }
1301 case nir_op_isub:
1302 dst[0] = ir3_SUB_U(b, src[0], 0, src[1], 0);
1303 break;
1304 case nir_op_ixor:
1305 dst[0] = ir3_XOR_B(b, src[0], 0, src[1], 0);
1306 break;
1307 case nir_op_ushr:
1308 dst[0] = ir3_SHR_B(b, src[0], 0, src[1], 0);
1309 break;
1310 case nir_op_ilt:
1311 dst[0] = ir3_CMPS_S(b, src[0], 0, src[1], 0);
1312 dst[0]->cat2.condition = IR3_COND_LT;
1313 dst[0] = ir3_n2b(b, dst[0]);
1314 break;
1315 case nir_op_ige:
1316 dst[0] = ir3_CMPS_S(b, src[0], 0, src[1], 0);
1317 dst[0]->cat2.condition = IR3_COND_GE;
1318 dst[0] = ir3_n2b(b, dst[0]);
1319 break;
1320 case nir_op_ieq:
1321 dst[0] = ir3_CMPS_S(b, src[0], 0, src[1], 0);
1322 dst[0]->cat2.condition = IR3_COND_EQ;
1323 dst[0] = ir3_n2b(b, dst[0]);
1324 break;
1325 case nir_op_ine:
1326 dst[0] = ir3_CMPS_S(b, src[0], 0, src[1], 0);
1327 dst[0]->cat2.condition = IR3_COND_NE;
1328 dst[0] = ir3_n2b(b, dst[0]);
1329 break;
1330 case nir_op_ult:
1331 dst[0] = ir3_CMPS_U(b, src[0], 0, src[1], 0);
1332 dst[0]->cat2.condition = IR3_COND_LT;
1333 dst[0] = ir3_n2b(b, dst[0]);
1334 break;
1335 case nir_op_uge:
1336 dst[0] = ir3_CMPS_U(b, src[0], 0, src[1], 0);
1337 dst[0]->cat2.condition = IR3_COND_GE;
1338 dst[0] = ir3_n2b(b, dst[0]);
1339 break;
1340
1341 case nir_op_bcsel: {
1342 struct ir3_instruction *cond = ir3_b2n(b, src[0]);
1343 compile_assert(ctx, bs[1] == bs[2]);
1344 /* the boolean condition is 32b even if src[1] and src[2] are
1345 * half-precision, but sel.b16 wants all three src's to be the
1346 * same type.
1347 */
1348 if (bs[1] < 32)
1349 cond = ir3_COV(b, cond, TYPE_U32, TYPE_U16);
1350 dst[0] = ir3_SEL_B32(b, src[1], 0, cond, 0, src[2], 0);
1351 break;
1352 }
1353 case nir_op_bit_count:
1354 dst[0] = ir3_CBITS_B(b, src[0], 0);
1355 break;
1356 case nir_op_ifind_msb: {
1357 struct ir3_instruction *cmp;
1358 dst[0] = ir3_CLZ_S(b, src[0], 0);
1359 cmp = ir3_CMPS_S(b, dst[0], 0, create_immed(b, 0), 0);
1360 cmp->cat2.condition = IR3_COND_GE;
1361 dst[0] = ir3_SEL_B32(b,
1362 ir3_SUB_U(b, create_immed(b, 31), 0, dst[0], 0), 0,
1363 cmp, 0, dst[0], 0);
1364 break;
1365 }
1366 case nir_op_ufind_msb:
1367 dst[0] = ir3_CLZ_B(b, src[0], 0);
1368 dst[0] = ir3_SEL_B32(b,
1369 ir3_SUB_U(b, create_immed(b, 31), 0, dst[0], 0), 0,
1370 src[0], 0, dst[0], 0);
1371 break;
1372 case nir_op_find_lsb:
1373 dst[0] = ir3_BFREV_B(b, src[0], 0);
1374 dst[0] = ir3_CLZ_B(b, dst[0], 0);
1375 break;
1376 case nir_op_bitfield_reverse:
1377 dst[0] = ir3_BFREV_B(b, src[0], 0);
1378 break;
1379
1380 default:
1381 compile_error(ctx, "Unhandled ALU op: %s\n",
1382 nir_op_infos[alu->op].name);
1383 break;
1384 }
1385
1386 put_dst(ctx, &alu->dest.dest);
1387 }
1388
1389 /* handles direct/indirect UBO reads: */
1390 static void
1391 emit_intrinsic_load_ubo(struct ir3_context *ctx, nir_intrinsic_instr *intr,
1392 struct ir3_instruction **dst)
1393 {
1394 struct ir3_block *b = ctx->block;
1395 struct ir3_instruction *base_lo, *base_hi, *addr, *src0, *src1;
1396 nir_const_value *const_offset;
1397 /* UBO addresses are the first driver params: */
1398 unsigned ubo = regid(ctx->so->constbase.ubo, 0);
1399 const unsigned ptrsz = pointer_size(ctx);
1400
1401 int off = 0;
1402
1403 /* First src is ubo index, which could either be an immed or not: */
1404 src0 = get_src(ctx, &intr->src[0])[0];
1405 if (is_same_type_mov(src0) &&
1406 (src0->regs[1]->flags & IR3_REG_IMMED)) {
1407 base_lo = create_uniform(ctx, ubo + (src0->regs[1]->iim_val * ptrsz));
1408 base_hi = create_uniform(ctx, ubo + (src0->regs[1]->iim_val * ptrsz) + 1);
1409 } else {
1410 base_lo = create_uniform_indirect(ctx, ubo, get_addr(ctx, src0, 4));
1411 base_hi = create_uniform_indirect(ctx, ubo + 1, get_addr(ctx, src0, 4));
1412 }
1413
1414 /* note: on 32bit gpu's base_hi is ignored and DCE'd */
1415 addr = base_lo;
1416
1417 const_offset = nir_src_as_const_value(intr->src[1]);
1418 if (const_offset) {
1419 off += const_offset->u32[0];
1420 } else {
1421 /* For load_ubo_indirect, second src is indirect offset: */
1422 src1 = get_src(ctx, &intr->src[1])[0];
1423
1424 /* and add offset to addr: */
1425 addr = ir3_ADD_S(b, addr, 0, src1, 0);
1426 }
1427
1428 /* if offset is to large to encode in the ldg, split it out: */
1429 if ((off + (intr->num_components * 4)) > 1024) {
1430 /* split out the minimal amount to improve the odds that
1431 * cp can fit the immediate in the add.s instruction:
1432 */
1433 unsigned off2 = off + (intr->num_components * 4) - 1024;
1434 addr = ir3_ADD_S(b, addr, 0, create_immed(b, off2), 0);
1435 off -= off2;
1436 }
1437
1438 if (ptrsz == 2) {
1439 struct ir3_instruction *carry;
1440
1441 /* handle 32b rollover, ie:
1442 * if (addr < base_lo)
1443 * base_hi++
1444 */
1445 carry = ir3_CMPS_U(b, addr, 0, base_lo, 0);
1446 carry->cat2.condition = IR3_COND_LT;
1447 base_hi = ir3_ADD_S(b, base_hi, 0, carry, 0);
1448
1449 addr = create_collect(ctx, (struct ir3_instruction*[]){ addr, base_hi }, 2);
1450 }
1451
1452 for (int i = 0; i < intr->num_components; i++) {
1453 struct ir3_instruction *load =
1454 ir3_LDG(b, addr, 0, create_immed(b, 1), 0);
1455 load->cat6.type = TYPE_U32;
1456 load->cat6.src_offset = off + i * 4; /* byte offset */
1457 dst[i] = load;
1458 }
1459 }
1460
1461 /* src[] = { buffer_index, offset }. No const_index */
1462 static void
1463 emit_intrinsic_load_ssbo(struct ir3_context *ctx, nir_intrinsic_instr *intr,
1464 struct ir3_instruction **dst)
1465 {
1466 struct ir3_block *b = ctx->block;
1467 struct ir3_instruction *ldgb, *src0, *src1, *offset;
1468 nir_const_value *const_offset;
1469
1470 /* can this be non-const buffer_index? how do we handle that? */
1471 const_offset = nir_src_as_const_value(intr->src[0]);
1472 compile_assert(ctx, const_offset);
1473
1474 offset = get_src(ctx, &intr->src[1])[0];
1475
1476 /* src0 is uvec2(offset*4, 0), src1 is offset.. nir already *= 4: */
1477 src0 = create_collect(ctx, (struct ir3_instruction*[]){
1478 offset,
1479 create_immed(b, 0),
1480 }, 2);
1481 src1 = ir3_SHR_B(b, offset, 0, create_immed(b, 2), 0);
1482
1483 ldgb = ir3_LDGB(b, create_immed(b, const_offset->u32[0]), 0,
1484 src0, 0, src1, 0);
1485 ldgb->regs[0]->wrmask = MASK(intr->num_components);
1486 ldgb->cat6.iim_val = intr->num_components;
1487 ldgb->cat6.d = 4;
1488 ldgb->cat6.type = TYPE_U32;
1489 ldgb->barrier_class = IR3_BARRIER_BUFFER_R;
1490 ldgb->barrier_conflict = IR3_BARRIER_BUFFER_W;
1491
1492 split_dest(b, dst, ldgb, 0, intr->num_components);
1493 }
1494
1495 /* src[] = { value, block_index, offset }. const_index[] = { write_mask } */
1496 static void
1497 emit_intrinsic_store_ssbo(struct ir3_context *ctx, nir_intrinsic_instr *intr)
1498 {
1499 struct ir3_block *b = ctx->block;
1500 struct ir3_instruction *stgb, *src0, *src1, *src2, *offset;
1501 nir_const_value *const_offset;
1502 /* TODO handle wrmask properly, see _store_shared().. but I think
1503 * it is more a PITA than that, since blob ends up loading the
1504 * masked components and writing them back out.
1505 */
1506 unsigned wrmask = intr->const_index[0];
1507 unsigned ncomp = ffs(~wrmask) - 1;
1508
1509 /* can this be non-const buffer_index? how do we handle that? */
1510 const_offset = nir_src_as_const_value(intr->src[1]);
1511 compile_assert(ctx, const_offset);
1512
1513 offset = get_src(ctx, &intr->src[2])[0];
1514
1515 /* src0 is value, src1 is offset, src2 is uvec2(offset*4, 0)..
1516 * nir already *= 4:
1517 */
1518 src0 = create_collect(ctx, get_src(ctx, &intr->src[0]), ncomp);
1519 src1 = ir3_SHR_B(b, offset, 0, create_immed(b, 2), 0);
1520 src2 = create_collect(ctx, (struct ir3_instruction*[]){
1521 offset,
1522 create_immed(b, 0),
1523 }, 2);
1524
1525 stgb = ir3_STGB(b, create_immed(b, const_offset->u32[0]), 0,
1526 src0, 0, src1, 0, src2, 0);
1527 stgb->cat6.iim_val = ncomp;
1528 stgb->cat6.d = 4;
1529 stgb->cat6.type = TYPE_U32;
1530 stgb->barrier_class = IR3_BARRIER_BUFFER_W;
1531 stgb->barrier_conflict = IR3_BARRIER_BUFFER_R | IR3_BARRIER_BUFFER_W;
1532
1533 array_insert(b, b->keeps, stgb);
1534 }
1535
1536 /* src[] = { block_index } */
1537 static void
1538 emit_intrinsic_ssbo_size(struct ir3_context *ctx, nir_intrinsic_instr *intr,
1539 struct ir3_instruction **dst)
1540 {
1541 /* SSBO size stored as a const starting at ssbo_sizes: */
1542 unsigned blk_idx = nir_src_as_const_value(intr->src[0])->u32[0];
1543 unsigned idx = regid(ctx->so->constbase.ssbo_sizes, 0) +
1544 ctx->so->const_layout.ssbo_size.off[blk_idx];
1545
1546 debug_assert(ctx->so->const_layout.ssbo_size.mask & (1 << blk_idx));
1547
1548 dst[0] = create_uniform(ctx, idx);
1549 }
1550
1551 /*
1552 * SSBO atomic intrinsics
1553 *
1554 * All of the SSBO atomic memory operations read a value from memory,
1555 * compute a new value using one of the operations below, write the new
1556 * value to memory, and return the original value read.
1557 *
1558 * All operations take 3 sources except CompSwap that takes 4. These
1559 * sources represent:
1560 *
1561 * 0: The SSBO buffer index.
1562 * 1: The offset into the SSBO buffer of the variable that the atomic
1563 * operation will operate on.
1564 * 2: The data parameter to the atomic function (i.e. the value to add
1565 * in ssbo_atomic_add, etc).
1566 * 3: For CompSwap only: the second data parameter.
1567 */
1568 static struct ir3_instruction *
1569 emit_intrinsic_atomic_ssbo(struct ir3_context *ctx, nir_intrinsic_instr *intr)
1570 {
1571 struct ir3_block *b = ctx->block;
1572 struct ir3_instruction *atomic, *ssbo, *src0, *src1, *src2, *offset;
1573 nir_const_value *const_offset;
1574 type_t type = TYPE_U32;
1575
1576 /* can this be non-const buffer_index? how do we handle that? */
1577 const_offset = nir_src_as_const_value(intr->src[0]);
1578 compile_assert(ctx, const_offset);
1579 ssbo = create_immed(b, const_offset->u32[0]);
1580
1581 offset = get_src(ctx, &intr->src[1])[0];
1582
1583 /* src0 is data (or uvec2(data, compare))
1584 * src1 is offset
1585 * src2 is uvec2(offset*4, 0) (appears to be 64b byte offset)
1586 *
1587 * Note that nir already multiplies the offset by four
1588 */
1589 src0 = get_src(ctx, &intr->src[2])[0];
1590 src1 = ir3_SHR_B(b, offset, 0, create_immed(b, 2), 0);
1591 src2 = create_collect(ctx, (struct ir3_instruction*[]){
1592 offset,
1593 create_immed(b, 0),
1594 }, 2);
1595
1596 switch (intr->intrinsic) {
1597 case nir_intrinsic_ssbo_atomic_add:
1598 atomic = ir3_ATOMIC_ADD_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
1599 break;
1600 case nir_intrinsic_ssbo_atomic_imin:
1601 atomic = ir3_ATOMIC_MIN_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
1602 type = TYPE_S32;
1603 break;
1604 case nir_intrinsic_ssbo_atomic_umin:
1605 atomic = ir3_ATOMIC_MIN_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
1606 break;
1607 case nir_intrinsic_ssbo_atomic_imax:
1608 atomic = ir3_ATOMIC_MAX_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
1609 type = TYPE_S32;
1610 break;
1611 case nir_intrinsic_ssbo_atomic_umax:
1612 atomic = ir3_ATOMIC_MAX_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
1613 break;
1614 case nir_intrinsic_ssbo_atomic_and:
1615 atomic = ir3_ATOMIC_AND_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
1616 break;
1617 case nir_intrinsic_ssbo_atomic_or:
1618 atomic = ir3_ATOMIC_OR_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
1619 break;
1620 case nir_intrinsic_ssbo_atomic_xor:
1621 atomic = ir3_ATOMIC_XOR_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
1622 break;
1623 case nir_intrinsic_ssbo_atomic_exchange:
1624 atomic = ir3_ATOMIC_XCHG_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
1625 break;
1626 case nir_intrinsic_ssbo_atomic_comp_swap:
1627 /* for cmpxchg, src0 is [ui]vec2(data, compare): */
1628 src0 = create_collect(ctx, (struct ir3_instruction*[]){
1629 get_src(ctx, &intr->src[3])[0],
1630 src0,
1631 }, 2);
1632 atomic = ir3_ATOMIC_CMPXCHG_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
1633 break;
1634 default:
1635 unreachable("boo");
1636 }
1637
1638 atomic->cat6.iim_val = 1;
1639 atomic->cat6.d = 4;
1640 atomic->cat6.type = type;
1641 atomic->barrier_class = IR3_BARRIER_BUFFER_W;
1642 atomic->barrier_conflict = IR3_BARRIER_BUFFER_R | IR3_BARRIER_BUFFER_W;
1643
1644 /* even if nothing consume the result, we can't DCE the instruction: */
1645 array_insert(b, b->keeps, atomic);
1646
1647 return atomic;
1648 }
1649
1650 /* src[] = { offset }. const_index[] = { base } */
1651 static void
1652 emit_intrinsic_load_shared(struct ir3_context *ctx, nir_intrinsic_instr *intr,
1653 struct ir3_instruction **dst)
1654 {
1655 struct ir3_block *b = ctx->block;
1656 struct ir3_instruction *ldl, *offset;
1657 unsigned base;
1658
1659 offset = get_src(ctx, &intr->src[0])[0];
1660 base = nir_intrinsic_base(intr);
1661
1662 ldl = ir3_LDL(b, offset, 0, create_immed(b, intr->num_components), 0);
1663 ldl->cat6.src_offset = base;
1664 ldl->cat6.type = utype_dst(intr->dest);
1665 ldl->regs[0]->wrmask = MASK(intr->num_components);
1666
1667 ldl->barrier_class = IR3_BARRIER_SHARED_R;
1668 ldl->barrier_conflict = IR3_BARRIER_SHARED_W;
1669
1670 split_dest(b, dst, ldl, 0, intr->num_components);
1671 }
1672
1673 /* src[] = { value, offset }. const_index[] = { base, write_mask } */
1674 static void
1675 emit_intrinsic_store_shared(struct ir3_context *ctx, nir_intrinsic_instr *intr)
1676 {
1677 struct ir3_block *b = ctx->block;
1678 struct ir3_instruction *stl, *offset;
1679 struct ir3_instruction * const *value;
1680 unsigned base, wrmask;
1681
1682 value = get_src(ctx, &intr->src[0]);
1683 offset = get_src(ctx, &intr->src[1])[0];
1684
1685 base = nir_intrinsic_base(intr);
1686 wrmask = nir_intrinsic_write_mask(intr);
1687
1688 /* Combine groups of consecutive enabled channels in one write
1689 * message. We use ffs to find the first enabled channel and then ffs on
1690 * the bit-inverse, down-shifted writemask to determine the length of
1691 * the block of enabled bits.
1692 *
1693 * (trick stolen from i965's fs_visitor::nir_emit_cs_intrinsic())
1694 */
1695 while (wrmask) {
1696 unsigned first_component = ffs(wrmask) - 1;
1697 unsigned length = ffs(~(wrmask >> first_component)) - 1;
1698
1699 stl = ir3_STL(b, offset, 0,
1700 create_collect(ctx, &value[first_component], length), 0,
1701 create_immed(b, length), 0);
1702 stl->cat6.dst_offset = first_component + base;
1703 stl->cat6.type = utype_src(intr->src[0]);
1704 stl->barrier_class = IR3_BARRIER_SHARED_W;
1705 stl->barrier_conflict = IR3_BARRIER_SHARED_R | IR3_BARRIER_SHARED_W;
1706
1707 array_insert(b, b->keeps, stl);
1708
1709 /* Clear the bits in the writemask that we just wrote, then try
1710 * again to see if more channels are left.
1711 */
1712 wrmask &= (15 << (first_component + length));
1713 }
1714 }
1715
1716 /*
1717 * CS shared variable atomic intrinsics
1718 *
1719 * All of the shared variable atomic memory operations read a value from
1720 * memory, compute a new value using one of the operations below, write the
1721 * new value to memory, and return the original value read.
1722 *
1723 * All operations take 2 sources except CompSwap that takes 3. These
1724 * sources represent:
1725 *
1726 * 0: The offset into the shared variable storage region that the atomic
1727 * operation will operate on.
1728 * 1: The data parameter to the atomic function (i.e. the value to add
1729 * in shared_atomic_add, etc).
1730 * 2: For CompSwap only: the second data parameter.
1731 */
1732 static struct ir3_instruction *
1733 emit_intrinsic_atomic_shared(struct ir3_context *ctx, nir_intrinsic_instr *intr)
1734 {
1735 struct ir3_block *b = ctx->block;
1736 struct ir3_instruction *atomic, *src0, *src1;
1737 type_t type = TYPE_U32;
1738
1739 src0 = get_src(ctx, &intr->src[0])[0]; /* offset */
1740 src1 = get_src(ctx, &intr->src[1])[0]; /* value */
1741
1742 switch (intr->intrinsic) {
1743 case nir_intrinsic_shared_atomic_add:
1744 atomic = ir3_ATOMIC_ADD(b, src0, 0, src1, 0);
1745 break;
1746 case nir_intrinsic_shared_atomic_imin:
1747 atomic = ir3_ATOMIC_MIN(b, src0, 0, src1, 0);
1748 type = TYPE_S32;
1749 break;
1750 case nir_intrinsic_shared_atomic_umin:
1751 atomic = ir3_ATOMIC_MIN(b, src0, 0, src1, 0);
1752 break;
1753 case nir_intrinsic_shared_atomic_imax:
1754 atomic = ir3_ATOMIC_MAX(b, src0, 0, src1, 0);
1755 type = TYPE_S32;
1756 break;
1757 case nir_intrinsic_shared_atomic_umax:
1758 atomic = ir3_ATOMIC_MAX(b, src0, 0, src1, 0);
1759 break;
1760 case nir_intrinsic_shared_atomic_and:
1761 atomic = ir3_ATOMIC_AND(b, src0, 0, src1, 0);
1762 break;
1763 case nir_intrinsic_shared_atomic_or:
1764 atomic = ir3_ATOMIC_OR(b, src0, 0, src1, 0);
1765 break;
1766 case nir_intrinsic_shared_atomic_xor:
1767 atomic = ir3_ATOMIC_XOR(b, src0, 0, src1, 0);
1768 break;
1769 case nir_intrinsic_shared_atomic_exchange:
1770 atomic = ir3_ATOMIC_XCHG(b, src0, 0, src1, 0);
1771 break;
1772 case nir_intrinsic_shared_atomic_comp_swap:
1773 /* for cmpxchg, src1 is [ui]vec2(data, compare): */
1774 src1 = create_collect(ctx, (struct ir3_instruction*[]){
1775 get_src(ctx, &intr->src[2])[0],
1776 src1,
1777 }, 2);
1778 atomic = ir3_ATOMIC_CMPXCHG(b, src0, 0, src1, 0);
1779 break;
1780 default:
1781 unreachable("boo");
1782 }
1783
1784 atomic->cat6.iim_val = 1;
1785 atomic->cat6.d = 1;
1786 atomic->cat6.type = type;
1787 atomic->barrier_class = IR3_BARRIER_SHARED_W;
1788 atomic->barrier_conflict = IR3_BARRIER_SHARED_R | IR3_BARRIER_SHARED_W;
1789
1790 /* even if nothing consume the result, we can't DCE the instruction: */
1791 array_insert(b, b->keeps, atomic);
1792
1793 return atomic;
1794 }
1795
1796 /* Images get mapped into SSBO/image state (for store/atomic) and texture
1797 * state block (for load). To simplify things, invert the image id and
1798 * map it from end of state block, ie. image 0 becomes num-1, image 1
1799 * becomes num-2, etc. This potentially avoids needing to re-emit texture
1800 * state when switching shaders.
1801 *
1802 * TODO is max # of samplers and SSBOs the same. This shouldn't be hard-
1803 * coded. Also, since all the gl shader stages (ie. everything but CS)
1804 * share the same SSBO/image state block, this might require some more
1805 * logic if we supported images in anything other than FS..
1806 */
1807 static unsigned
1808 get_image_slot(struct ir3_context *ctx, nir_deref_instr *deref)
1809 {
1810 unsigned int loc = 0;
1811 unsigned inner_size = 1;
1812
1813 while (deref->deref_type != nir_deref_type_var) {
1814 assert(deref->deref_type == nir_deref_type_array);
1815 nir_const_value *const_index = nir_src_as_const_value(deref->arr.index);
1816 assert(const_index);
1817
1818 /* Go to the next instruction */
1819 deref = nir_deref_instr_parent(deref);
1820
1821 assert(glsl_type_is_array(deref->type));
1822 const unsigned array_len = glsl_get_length(deref->type);
1823 loc += MIN2(const_index->u32[0], array_len - 1) * inner_size;
1824
1825 /* Update the inner size */
1826 inner_size *= array_len;
1827 }
1828
1829 loc += deref->var->data.driver_location;
1830
1831 /* TODO figure out real limit per generation, and don't hardcode: */
1832 const unsigned max_samplers = 16;
1833 return max_samplers - loc - 1;
1834 }
1835
1836 /* see tex_info() for equiv logic for texture instructions.. it would be
1837 * nice if this could be better unified..
1838 */
1839 static unsigned
1840 get_image_coords(const nir_variable *var, unsigned *flagsp)
1841 {
1842 const struct glsl_type *type = glsl_without_array(var->type);
1843 unsigned coords, flags = 0;
1844
1845 switch (glsl_get_sampler_dim(type)) {
1846 case GLSL_SAMPLER_DIM_1D:
1847 case GLSL_SAMPLER_DIM_BUF:
1848 coords = 1;
1849 break;
1850 case GLSL_SAMPLER_DIM_2D:
1851 case GLSL_SAMPLER_DIM_RECT:
1852 case GLSL_SAMPLER_DIM_EXTERNAL:
1853 case GLSL_SAMPLER_DIM_MS:
1854 coords = 2;
1855 break;
1856 case GLSL_SAMPLER_DIM_3D:
1857 case GLSL_SAMPLER_DIM_CUBE:
1858 flags |= IR3_INSTR_3D;
1859 coords = 3;
1860 break;
1861 default:
1862 unreachable("bad sampler dim");
1863 return 0;
1864 }
1865
1866 if (glsl_sampler_type_is_array(type)) {
1867 /* note: unlike tex_info(), adjust # of coords to include array idx: */
1868 coords++;
1869 flags |= IR3_INSTR_A;
1870 }
1871
1872 if (flagsp)
1873 *flagsp = flags;
1874
1875 return coords;
1876 }
1877
1878 static type_t
1879 get_image_type(const nir_variable *var)
1880 {
1881 switch (glsl_get_sampler_result_type(glsl_without_array(var->type))) {
1882 case GLSL_TYPE_UINT:
1883 return TYPE_U32;
1884 case GLSL_TYPE_INT:
1885 return TYPE_S32;
1886 case GLSL_TYPE_FLOAT:
1887 return TYPE_F32;
1888 default:
1889 unreachable("bad sampler type.");
1890 return 0;
1891 }
1892 }
1893
1894 static struct ir3_instruction *
1895 get_image_offset(struct ir3_context *ctx, const nir_variable *var,
1896 struct ir3_instruction * const *coords, bool byteoff)
1897 {
1898 struct ir3_block *b = ctx->block;
1899 struct ir3_instruction *offset;
1900 unsigned ncoords = get_image_coords(var, NULL);
1901
1902 /* to calculate the byte offset (yes, uggg) we need (up to) three
1903 * const values to know the bytes per pixel, and y and z stride:
1904 */
1905 unsigned cb = regid(ctx->so->constbase.image_dims, 0) +
1906 ctx->so->const_layout.image_dims.off[var->data.driver_location];
1907
1908 debug_assert(ctx->so->const_layout.image_dims.mask &
1909 (1 << var->data.driver_location));
1910
1911 /* offset = coords.x * bytes_per_pixel: */
1912 offset = ir3_MUL_S(b, coords[0], 0, create_uniform(ctx, cb + 0), 0);
1913 if (ncoords > 1) {
1914 /* offset += coords.y * y_pitch: */
1915 offset = ir3_MAD_S24(b, create_uniform(ctx, cb + 1), 0,
1916 coords[1], 0, offset, 0);
1917 }
1918 if (ncoords > 2) {
1919 /* offset += coords.z * z_pitch: */
1920 offset = ir3_MAD_S24(b, create_uniform(ctx, cb + 2), 0,
1921 coords[2], 0, offset, 0);
1922 }
1923
1924 if (!byteoff) {
1925 /* Some cases, like atomics, seem to use dword offset instead
1926 * of byte offsets.. blob just puts an extra shr.b in there
1927 * in those cases:
1928 */
1929 offset = ir3_SHR_B(b, offset, 0, create_immed(b, 2), 0);
1930 }
1931
1932 return create_collect(ctx, (struct ir3_instruction*[]){
1933 offset,
1934 create_immed(b, 0),
1935 }, 2);
1936 }
1937
1938 /* src[] = { deref, coord, sample_index }. const_index[] = {} */
1939 static void
1940 emit_intrinsic_load_image(struct ir3_context *ctx, nir_intrinsic_instr *intr,
1941 struct ir3_instruction **dst)
1942 {
1943 struct ir3_block *b = ctx->block;
1944 const nir_variable *var = nir_intrinsic_get_var(intr, 0);
1945 struct ir3_instruction *sam;
1946 struct ir3_instruction * const *src0 = get_src(ctx, &intr->src[1]);
1947 struct ir3_instruction *coords[4];
1948 unsigned flags, ncoords = get_image_coords(var, &flags);
1949 unsigned tex_idx = get_image_slot(ctx, nir_src_as_deref(intr->src[0]));
1950 type_t type = get_image_type(var);
1951
1952 /* hmm, this seems a bit odd, but it is what blob does and (at least
1953 * a5xx) just faults on bogus addresses otherwise:
1954 */
1955 if (flags & IR3_INSTR_3D) {
1956 flags &= ~IR3_INSTR_3D;
1957 flags |= IR3_INSTR_A;
1958 }
1959
1960 for (unsigned i = 0; i < ncoords; i++)
1961 coords[i] = src0[i];
1962
1963 if (ncoords == 1)
1964 coords[ncoords++] = create_immed(b, 0);
1965
1966 sam = ir3_SAM(b, OPC_ISAM, type, 0b1111, flags,
1967 tex_idx, tex_idx, create_collect(ctx, coords, ncoords), NULL);
1968
1969 sam->barrier_class = IR3_BARRIER_IMAGE_R;
1970 sam->barrier_conflict = IR3_BARRIER_IMAGE_W;
1971
1972 split_dest(b, dst, sam, 0, 4);
1973 }
1974
1975 /* src[] = { deref, coord, sample_index, value }. const_index[] = {} */
1976 static void
1977 emit_intrinsic_store_image(struct ir3_context *ctx, nir_intrinsic_instr *intr)
1978 {
1979 struct ir3_block *b = ctx->block;
1980 const nir_variable *var = nir_intrinsic_get_var(intr, 0);
1981 struct ir3_instruction *stib, *offset;
1982 struct ir3_instruction * const *value = get_src(ctx, &intr->src[3]);
1983 struct ir3_instruction * const *coords = get_src(ctx, &intr->src[1]);
1984 unsigned ncoords = get_image_coords(var, NULL);
1985 unsigned tex_idx = get_image_slot(ctx, nir_src_as_deref(intr->src[0]));
1986
1987 /* src0 is value
1988 * src1 is coords
1989 * src2 is 64b byte offset
1990 */
1991
1992 offset = get_image_offset(ctx, var, coords, true);
1993
1994 /* NOTE: stib seems to take byte offset, but stgb.typed can be used
1995 * too and takes a dword offset.. not quite sure yet why blob uses
1996 * one over the other in various cases.
1997 */
1998
1999 stib = ir3_STIB(b, create_immed(b, tex_idx), 0,
2000 create_collect(ctx, value, 4), 0,
2001 create_collect(ctx, coords, ncoords), 0,
2002 offset, 0);
2003 stib->cat6.iim_val = 4;
2004 stib->cat6.d = ncoords;
2005 stib->cat6.type = get_image_type(var);
2006 stib->cat6.typed = true;
2007 stib->barrier_class = IR3_BARRIER_IMAGE_W;
2008 stib->barrier_conflict = IR3_BARRIER_IMAGE_R | IR3_BARRIER_IMAGE_W;
2009
2010 array_insert(b, b->keeps, stib);
2011 }
2012
2013 static void
2014 emit_intrinsic_image_size(struct ir3_context *ctx, nir_intrinsic_instr *intr,
2015 struct ir3_instruction **dst)
2016 {
2017 struct ir3_block *b = ctx->block;
2018 const nir_variable *var = nir_intrinsic_get_var(intr, 0);
2019 unsigned tex_idx = get_image_slot(ctx, nir_src_as_deref(intr->src[0]));
2020 struct ir3_instruction *sam, *lod;
2021 unsigned flags, ncoords = get_image_coords(var, &flags);
2022
2023 lod = create_immed(b, 0);
2024 sam = ir3_SAM(b, OPC_GETSIZE, TYPE_U32, 0b1111, flags,
2025 tex_idx, tex_idx, lod, NULL);
2026
2027 /* Array size actually ends up in .w rather than .z. This doesn't
2028 * matter for miplevel 0, but for higher mips the value in z is
2029 * minified whereas w stays. Also, the value in TEX_CONST_3_DEPTH is
2030 * returned, which means that we have to add 1 to it for arrays for
2031 * a3xx.
2032 *
2033 * Note use a temporary dst and then copy, since the size of the dst
2034 * array that is passed in is based on nir's understanding of the
2035 * result size, not the hardware's
2036 */
2037 struct ir3_instruction *tmp[4];
2038
2039 split_dest(b, tmp, sam, 0, 4);
2040
2041 /* get_size instruction returns size in bytes instead of texels
2042 * for imageBuffer, so we need to divide it by the pixel size
2043 * of the image format.
2044 *
2045 * TODO: This is at least true on a5xx. Check other gens.
2046 */
2047 enum glsl_sampler_dim dim =
2048 glsl_get_sampler_dim(glsl_without_array(var->type));
2049 if (dim == GLSL_SAMPLER_DIM_BUF) {
2050 /* Since all the possible values the divisor can take are
2051 * power-of-two (4, 8, or 16), the division is implemented
2052 * as a shift-right.
2053 * During shader setup, the log2 of the image format's
2054 * bytes-per-pixel should have been emitted in 2nd slot of
2055 * image_dims. See ir3_shader::emit_image_dims().
2056 */
2057 unsigned cb = regid(ctx->so->constbase.image_dims, 0) +
2058 ctx->so->const_layout.image_dims.off[var->data.driver_location];
2059 struct ir3_instruction *aux = create_uniform(ctx, cb + 1);
2060
2061 tmp[0] = ir3_SHR_B(b, tmp[0], 0, aux, 0);
2062 }
2063
2064 for (unsigned i = 0; i < ncoords; i++)
2065 dst[i] = tmp[i];
2066
2067 if (flags & IR3_INSTR_A) {
2068 if (ctx->compiler->levels_add_one) {
2069 dst[ncoords-1] = ir3_ADD_U(b, tmp[3], 0, create_immed(b, 1), 0);
2070 } else {
2071 dst[ncoords-1] = ir3_MOV(b, tmp[3], TYPE_U32);
2072 }
2073 }
2074 }
2075
2076 /* src[] = { deref, coord, sample_index, value, compare }. const_index[] = {} */
2077 static struct ir3_instruction *
2078 emit_intrinsic_atomic_image(struct ir3_context *ctx, nir_intrinsic_instr *intr)
2079 {
2080 struct ir3_block *b = ctx->block;
2081 const nir_variable *var = nir_intrinsic_get_var(intr, 0);
2082 struct ir3_instruction *atomic, *image, *src0, *src1, *src2;
2083 struct ir3_instruction * const *coords = get_src(ctx, &intr->src[1]);
2084 unsigned ncoords = get_image_coords(var, NULL);
2085
2086 image = create_immed(b, get_image_slot(ctx, nir_src_as_deref(intr->src[0])));
2087
2088 /* src0 is value (or uvec2(value, compare))
2089 * src1 is coords
2090 * src2 is 64b byte offset
2091 */
2092 src0 = get_src(ctx, &intr->src[3])[0];
2093 src1 = create_collect(ctx, coords, ncoords);
2094 src2 = get_image_offset(ctx, var, coords, false);
2095
2096 switch (intr->intrinsic) {
2097 case nir_intrinsic_image_deref_atomic_add:
2098 atomic = ir3_ATOMIC_ADD_G(b, image, 0, src0, 0, src1, 0, src2, 0);
2099 break;
2100 case nir_intrinsic_image_deref_atomic_min:
2101 atomic = ir3_ATOMIC_MIN_G(b, image, 0, src0, 0, src1, 0, src2, 0);
2102 break;
2103 case nir_intrinsic_image_deref_atomic_max:
2104 atomic = ir3_ATOMIC_MAX_G(b, image, 0, src0, 0, src1, 0, src2, 0);
2105 break;
2106 case nir_intrinsic_image_deref_atomic_and:
2107 atomic = ir3_ATOMIC_AND_G(b, image, 0, src0, 0, src1, 0, src2, 0);
2108 break;
2109 case nir_intrinsic_image_deref_atomic_or:
2110 atomic = ir3_ATOMIC_OR_G(b, image, 0, src0, 0, src1, 0, src2, 0);
2111 break;
2112 case nir_intrinsic_image_deref_atomic_xor:
2113 atomic = ir3_ATOMIC_XOR_G(b, image, 0, src0, 0, src1, 0, src2, 0);
2114 break;
2115 case nir_intrinsic_image_deref_atomic_exchange:
2116 atomic = ir3_ATOMIC_XCHG_G(b, image, 0, src0, 0, src1, 0, src2, 0);
2117 break;
2118 case nir_intrinsic_image_deref_atomic_comp_swap:
2119 /* for cmpxchg, src0 is [ui]vec2(data, compare): */
2120 src0 = create_collect(ctx, (struct ir3_instruction*[]){
2121 get_src(ctx, &intr->src[4])[0],
2122 src0,
2123 }, 2);
2124 atomic = ir3_ATOMIC_CMPXCHG_G(b, image, 0, src0, 0, src1, 0, src2, 0);
2125 break;
2126 default:
2127 unreachable("boo");
2128 }
2129
2130 atomic->cat6.iim_val = 1;
2131 atomic->cat6.d = ncoords;
2132 atomic->cat6.type = get_image_type(var);
2133 atomic->cat6.typed = true;
2134 atomic->barrier_class = IR3_BARRIER_IMAGE_W;
2135 atomic->barrier_conflict = IR3_BARRIER_IMAGE_R | IR3_BARRIER_IMAGE_W;
2136
2137 /* even if nothing consume the result, we can't DCE the instruction: */
2138 array_insert(b, b->keeps, atomic);
2139
2140 return atomic;
2141 }
2142
2143 static void
2144 emit_intrinsic_barrier(struct ir3_context *ctx, nir_intrinsic_instr *intr)
2145 {
2146 struct ir3_block *b = ctx->block;
2147 struct ir3_instruction *barrier;
2148
2149 switch (intr->intrinsic) {
2150 case nir_intrinsic_barrier:
2151 barrier = ir3_BAR(b);
2152 barrier->cat7.g = true;
2153 barrier->cat7.l = true;
2154 barrier->flags = IR3_INSTR_SS | IR3_INSTR_SY;
2155 barrier->barrier_class = IR3_BARRIER_EVERYTHING;
2156 break;
2157 case nir_intrinsic_memory_barrier:
2158 barrier = ir3_FENCE(b);
2159 barrier->cat7.g = true;
2160 barrier->cat7.r = true;
2161 barrier->cat7.w = true;
2162 barrier->barrier_class = IR3_BARRIER_IMAGE_W |
2163 IR3_BARRIER_BUFFER_W;
2164 barrier->barrier_conflict =
2165 IR3_BARRIER_IMAGE_R | IR3_BARRIER_IMAGE_W |
2166 IR3_BARRIER_BUFFER_R | IR3_BARRIER_BUFFER_W;
2167 break;
2168 case nir_intrinsic_memory_barrier_atomic_counter:
2169 case nir_intrinsic_memory_barrier_buffer:
2170 barrier = ir3_FENCE(b);
2171 barrier->cat7.g = true;
2172 barrier->cat7.r = true;
2173 barrier->cat7.w = true;
2174 barrier->barrier_class = IR3_BARRIER_BUFFER_W;
2175 barrier->barrier_conflict = IR3_BARRIER_BUFFER_R |
2176 IR3_BARRIER_BUFFER_W;
2177 break;
2178 case nir_intrinsic_memory_barrier_image:
2179 // TODO double check if this should have .g set
2180 barrier = ir3_FENCE(b);
2181 barrier->cat7.g = true;
2182 barrier->cat7.r = true;
2183 barrier->cat7.w = true;
2184 barrier->barrier_class = IR3_BARRIER_IMAGE_W;
2185 barrier->barrier_conflict = IR3_BARRIER_IMAGE_R |
2186 IR3_BARRIER_IMAGE_W;
2187 break;
2188 case nir_intrinsic_memory_barrier_shared:
2189 barrier = ir3_FENCE(b);
2190 barrier->cat7.g = true;
2191 barrier->cat7.l = true;
2192 barrier->cat7.r = true;
2193 barrier->cat7.w = true;
2194 barrier->barrier_class = IR3_BARRIER_SHARED_W;
2195 barrier->barrier_conflict = IR3_BARRIER_SHARED_R |
2196 IR3_BARRIER_SHARED_W;
2197 break;
2198 case nir_intrinsic_group_memory_barrier:
2199 barrier = ir3_FENCE(b);
2200 barrier->cat7.g = true;
2201 barrier->cat7.l = true;
2202 barrier->cat7.r = true;
2203 barrier->cat7.w = true;
2204 barrier->barrier_class = IR3_BARRIER_SHARED_W |
2205 IR3_BARRIER_IMAGE_W |
2206 IR3_BARRIER_BUFFER_W;
2207 barrier->barrier_conflict =
2208 IR3_BARRIER_SHARED_R | IR3_BARRIER_SHARED_W |
2209 IR3_BARRIER_IMAGE_R | IR3_BARRIER_IMAGE_W |
2210 IR3_BARRIER_BUFFER_R | IR3_BARRIER_BUFFER_W;
2211 break;
2212 default:
2213 unreachable("boo");
2214 }
2215
2216 /* make sure barrier doesn't get DCE'd */
2217 array_insert(b, b->keeps, barrier);
2218 }
2219
2220 static void add_sysval_input_compmask(struct ir3_context *ctx,
2221 gl_system_value slot, unsigned compmask,
2222 struct ir3_instruction *instr)
2223 {
2224 struct ir3_shader_variant *so = ctx->so;
2225 unsigned r = regid(so->inputs_count, 0);
2226 unsigned n = so->inputs_count++;
2227
2228 so->inputs[n].sysval = true;
2229 so->inputs[n].slot = slot;
2230 so->inputs[n].compmask = compmask;
2231 so->inputs[n].regid = r;
2232 so->inputs[n].interpolate = INTERP_MODE_FLAT;
2233 so->total_in++;
2234
2235 ctx->ir->ninputs = MAX2(ctx->ir->ninputs, r + 1);
2236 ctx->ir->inputs[r] = instr;
2237 }
2238
2239 static void add_sysval_input(struct ir3_context *ctx, gl_system_value slot,
2240 struct ir3_instruction *instr)
2241 {
2242 add_sysval_input_compmask(ctx, slot, 0x1, instr);
2243 }
2244
2245 static void
2246 emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr)
2247 {
2248 const nir_intrinsic_info *info = &nir_intrinsic_infos[intr->intrinsic];
2249 struct ir3_instruction **dst;
2250 struct ir3_instruction * const *src;
2251 struct ir3_block *b = ctx->block;
2252 nir_const_value *const_offset;
2253 int idx, comp;
2254
2255 if (info->has_dest) {
2256 unsigned n = nir_intrinsic_dest_components(intr);
2257 dst = get_dst(ctx, &intr->dest, n);
2258 } else {
2259 dst = NULL;
2260 }
2261
2262 switch (intr->intrinsic) {
2263 case nir_intrinsic_load_uniform:
2264 idx = nir_intrinsic_base(intr);
2265 const_offset = nir_src_as_const_value(intr->src[0]);
2266 if (const_offset) {
2267 idx += const_offset->u32[0];
2268 for (int i = 0; i < intr->num_components; i++) {
2269 unsigned n = idx * 4 + i;
2270 dst[i] = create_uniform(ctx, n);
2271 }
2272 } else {
2273 src = get_src(ctx, &intr->src[0]);
2274 for (int i = 0; i < intr->num_components; i++) {
2275 int n = idx * 4 + i;
2276 dst[i] = create_uniform_indirect(ctx, n,
2277 get_addr(ctx, src[0], 4));
2278 }
2279 /* NOTE: if relative addressing is used, we set
2280 * constlen in the compiler (to worst-case value)
2281 * since we don't know in the assembler what the max
2282 * addr reg value can be:
2283 */
2284 ctx->so->constlen = ctx->s->num_uniforms;
2285 }
2286 break;
2287 case nir_intrinsic_load_ubo:
2288 emit_intrinsic_load_ubo(ctx, intr, dst);
2289 break;
2290 case nir_intrinsic_load_input:
2291 idx = nir_intrinsic_base(intr);
2292 comp = nir_intrinsic_component(intr);
2293 const_offset = nir_src_as_const_value(intr->src[0]);
2294 if (const_offset) {
2295 idx += const_offset->u32[0];
2296 for (int i = 0; i < intr->num_components; i++) {
2297 unsigned n = idx * 4 + i + comp;
2298 dst[i] = ctx->ir->inputs[n];
2299 }
2300 } else {
2301 src = get_src(ctx, &intr->src[0]);
2302 struct ir3_instruction *collect =
2303 create_collect(ctx, ctx->ir->inputs, ctx->ir->ninputs);
2304 struct ir3_instruction *addr = get_addr(ctx, src[0], 4);
2305 for (int i = 0; i < intr->num_components; i++) {
2306 unsigned n = idx * 4 + i + comp;
2307 dst[i] = create_indirect_load(ctx, ctx->ir->ninputs,
2308 n, addr, collect);
2309 }
2310 }
2311 break;
2312 case nir_intrinsic_load_ssbo:
2313 emit_intrinsic_load_ssbo(ctx, intr, dst);
2314 break;
2315 case nir_intrinsic_store_ssbo:
2316 emit_intrinsic_store_ssbo(ctx, intr);
2317 break;
2318 case nir_intrinsic_get_buffer_size:
2319 emit_intrinsic_ssbo_size(ctx, intr, dst);
2320 break;
2321 case nir_intrinsic_ssbo_atomic_add:
2322 case nir_intrinsic_ssbo_atomic_imin:
2323 case nir_intrinsic_ssbo_atomic_umin:
2324 case nir_intrinsic_ssbo_atomic_imax:
2325 case nir_intrinsic_ssbo_atomic_umax:
2326 case nir_intrinsic_ssbo_atomic_and:
2327 case nir_intrinsic_ssbo_atomic_or:
2328 case nir_intrinsic_ssbo_atomic_xor:
2329 case nir_intrinsic_ssbo_atomic_exchange:
2330 case nir_intrinsic_ssbo_atomic_comp_swap:
2331 dst[0] = emit_intrinsic_atomic_ssbo(ctx, intr);
2332 break;
2333 case nir_intrinsic_load_shared:
2334 emit_intrinsic_load_shared(ctx, intr, dst);
2335 break;
2336 case nir_intrinsic_store_shared:
2337 emit_intrinsic_store_shared(ctx, intr);
2338 break;
2339 case nir_intrinsic_shared_atomic_add:
2340 case nir_intrinsic_shared_atomic_imin:
2341 case nir_intrinsic_shared_atomic_umin:
2342 case nir_intrinsic_shared_atomic_imax:
2343 case nir_intrinsic_shared_atomic_umax:
2344 case nir_intrinsic_shared_atomic_and:
2345 case nir_intrinsic_shared_atomic_or:
2346 case nir_intrinsic_shared_atomic_xor:
2347 case nir_intrinsic_shared_atomic_exchange:
2348 case nir_intrinsic_shared_atomic_comp_swap:
2349 dst[0] = emit_intrinsic_atomic_shared(ctx, intr);
2350 break;
2351 case nir_intrinsic_image_deref_load:
2352 emit_intrinsic_load_image(ctx, intr, dst);
2353 break;
2354 case nir_intrinsic_image_deref_store:
2355 emit_intrinsic_store_image(ctx, intr);
2356 break;
2357 case nir_intrinsic_image_deref_size:
2358 emit_intrinsic_image_size(ctx, intr, dst);
2359 break;
2360 case nir_intrinsic_image_deref_atomic_add:
2361 case nir_intrinsic_image_deref_atomic_min:
2362 case nir_intrinsic_image_deref_atomic_max:
2363 case nir_intrinsic_image_deref_atomic_and:
2364 case nir_intrinsic_image_deref_atomic_or:
2365 case nir_intrinsic_image_deref_atomic_xor:
2366 case nir_intrinsic_image_deref_atomic_exchange:
2367 case nir_intrinsic_image_deref_atomic_comp_swap:
2368 dst[0] = emit_intrinsic_atomic_image(ctx, intr);
2369 break;
2370 case nir_intrinsic_barrier:
2371 case nir_intrinsic_memory_barrier:
2372 case nir_intrinsic_group_memory_barrier:
2373 case nir_intrinsic_memory_barrier_atomic_counter:
2374 case nir_intrinsic_memory_barrier_buffer:
2375 case nir_intrinsic_memory_barrier_image:
2376 case nir_intrinsic_memory_barrier_shared:
2377 emit_intrinsic_barrier(ctx, intr);
2378 /* note that blk ptr no longer valid, make that obvious: */
2379 b = NULL;
2380 break;
2381 case nir_intrinsic_store_output:
2382 idx = nir_intrinsic_base(intr);
2383 comp = nir_intrinsic_component(intr);
2384 const_offset = nir_src_as_const_value(intr->src[1]);
2385 compile_assert(ctx, const_offset != NULL);
2386 idx += const_offset->u32[0];
2387
2388 src = get_src(ctx, &intr->src[0]);
2389 for (int i = 0; i < intr->num_components; i++) {
2390 unsigned n = idx * 4 + i + comp;
2391 ctx->ir->outputs[n] = src[i];
2392 }
2393 break;
2394 case nir_intrinsic_load_base_vertex:
2395 case nir_intrinsic_load_first_vertex:
2396 if (!ctx->basevertex) {
2397 ctx->basevertex = create_driver_param(ctx, IR3_DP_VTXID_BASE);
2398 add_sysval_input(ctx, SYSTEM_VALUE_FIRST_VERTEX, ctx->basevertex);
2399 }
2400 dst[0] = ctx->basevertex;
2401 break;
2402 case nir_intrinsic_load_vertex_id_zero_base:
2403 case nir_intrinsic_load_vertex_id:
2404 if (!ctx->vertex_id) {
2405 gl_system_value sv = (intr->intrinsic == nir_intrinsic_load_vertex_id) ?
2406 SYSTEM_VALUE_VERTEX_ID : SYSTEM_VALUE_VERTEX_ID_ZERO_BASE;
2407 ctx->vertex_id = create_input(ctx, 0);
2408 add_sysval_input(ctx, sv, ctx->vertex_id);
2409 }
2410 dst[0] = ctx->vertex_id;
2411 break;
2412 case nir_intrinsic_load_instance_id:
2413 if (!ctx->instance_id) {
2414 ctx->instance_id = create_input(ctx, 0);
2415 add_sysval_input(ctx, SYSTEM_VALUE_INSTANCE_ID,
2416 ctx->instance_id);
2417 }
2418 dst[0] = ctx->instance_id;
2419 break;
2420 case nir_intrinsic_load_sample_id:
2421 case nir_intrinsic_load_sample_id_no_per_sample:
2422 if (!ctx->samp_id) {
2423 ctx->samp_id = create_input(ctx, 0);
2424 ctx->samp_id->regs[0]->flags |= IR3_REG_HALF;
2425 add_sysval_input(ctx, SYSTEM_VALUE_SAMPLE_ID,
2426 ctx->samp_id);
2427 }
2428 dst[0] = ir3_COV(b, ctx->samp_id, TYPE_U16, TYPE_U32);
2429 break;
2430 case nir_intrinsic_load_sample_mask_in:
2431 if (!ctx->samp_mask_in) {
2432 ctx->samp_mask_in = create_input(ctx, 0);
2433 add_sysval_input(ctx, SYSTEM_VALUE_SAMPLE_MASK_IN,
2434 ctx->samp_mask_in);
2435 }
2436 dst[0] = ctx->samp_mask_in;
2437 break;
2438 case nir_intrinsic_load_user_clip_plane:
2439 idx = nir_intrinsic_ucp_id(intr);
2440 for (int i = 0; i < intr->num_components; i++) {
2441 unsigned n = idx * 4 + i;
2442 dst[i] = create_driver_param(ctx, IR3_DP_UCP0_X + n);
2443 }
2444 break;
2445 case nir_intrinsic_load_front_face:
2446 if (!ctx->frag_face) {
2447 ctx->so->frag_face = true;
2448 ctx->frag_face = create_input(ctx, 0);
2449 add_sysval_input(ctx, SYSTEM_VALUE_FRONT_FACE, ctx->frag_face);
2450 ctx->frag_face->regs[0]->flags |= IR3_REG_HALF;
2451 }
2452 /* for fragface, we get -1 for back and 0 for front. However this is
2453 * the inverse of what nir expects (where ~0 is true).
2454 */
2455 dst[0] = ir3_COV(b, ctx->frag_face, TYPE_S16, TYPE_S32);
2456 dst[0] = ir3_NOT_B(b, dst[0], 0);
2457 break;
2458 case nir_intrinsic_load_local_invocation_id:
2459 if (!ctx->local_invocation_id) {
2460 ctx->local_invocation_id = create_input_compmask(ctx, 0, 0x7);
2461 add_sysval_input_compmask(ctx, SYSTEM_VALUE_LOCAL_INVOCATION_ID,
2462 0x7, ctx->local_invocation_id);
2463 }
2464 split_dest(b, dst, ctx->local_invocation_id, 0, 3);
2465 break;
2466 case nir_intrinsic_load_work_group_id:
2467 if (!ctx->work_group_id) {
2468 ctx->work_group_id = create_input_compmask(ctx, 0, 0x7);
2469 add_sysval_input_compmask(ctx, SYSTEM_VALUE_WORK_GROUP_ID,
2470 0x7, ctx->work_group_id);
2471 ctx->work_group_id->regs[0]->flags |= IR3_REG_HIGH;
2472 }
2473 split_dest(b, dst, ctx->work_group_id, 0, 3);
2474 break;
2475 case nir_intrinsic_load_num_work_groups:
2476 for (int i = 0; i < intr->num_components; i++) {
2477 dst[i] = create_driver_param(ctx, IR3_DP_NUM_WORK_GROUPS_X + i);
2478 }
2479 break;
2480 case nir_intrinsic_load_local_group_size:
2481 for (int i = 0; i < intr->num_components; i++) {
2482 dst[i] = create_driver_param(ctx, IR3_DP_LOCAL_GROUP_SIZE_X + i);
2483 }
2484 break;
2485 case nir_intrinsic_discard_if:
2486 case nir_intrinsic_discard: {
2487 struct ir3_instruction *cond, *kill;
2488
2489 if (intr->intrinsic == nir_intrinsic_discard_if) {
2490 /* conditional discard: */
2491 src = get_src(ctx, &intr->src[0]);
2492 cond = ir3_b2n(b, src[0]);
2493 } else {
2494 /* unconditional discard: */
2495 cond = create_immed(b, 1);
2496 }
2497
2498 /* NOTE: only cmps.*.* can write p0.x: */
2499 cond = ir3_CMPS_S(b, cond, 0, create_immed(b, 0), 0);
2500 cond->cat2.condition = IR3_COND_NE;
2501
2502 /* condition always goes in predicate register: */
2503 cond->regs[0]->num = regid(REG_P0, 0);
2504
2505 kill = ir3_KILL(b, cond, 0);
2506 array_insert(ctx->ir, ctx->ir->predicates, kill);
2507
2508 array_insert(b, b->keeps, kill);
2509 ctx->so->has_kill = true;
2510
2511 break;
2512 }
2513 default:
2514 compile_error(ctx, "Unhandled intrinsic type: %s\n",
2515 nir_intrinsic_infos[intr->intrinsic].name);
2516 break;
2517 }
2518
2519 if (info->has_dest)
2520 put_dst(ctx, &intr->dest);
2521 }
2522
2523 static void
2524 emit_load_const(struct ir3_context *ctx, nir_load_const_instr *instr)
2525 {
2526 struct ir3_instruction **dst = get_dst_ssa(ctx, &instr->def,
2527 instr->def.num_components);
2528 type_t type = (instr->def.bit_size < 32) ? TYPE_U16 : TYPE_U32;
2529
2530 for (int i = 0; i < instr->def.num_components; i++)
2531 dst[i] = create_immed_typed(ctx->block, instr->value.u32[i], type);
2532 }
2533
2534 static void
2535 emit_undef(struct ir3_context *ctx, nir_ssa_undef_instr *undef)
2536 {
2537 struct ir3_instruction **dst = get_dst_ssa(ctx, &undef->def,
2538 undef->def.num_components);
2539 type_t type = (undef->def.bit_size < 32) ? TYPE_U16 : TYPE_U32;
2540
2541 /* backend doesn't want undefined instructions, so just plug
2542 * in 0.0..
2543 */
2544 for (int i = 0; i < undef->def.num_components; i++)
2545 dst[i] = create_immed_typed(ctx->block, fui(0.0), type);
2546 }
2547
2548 /*
2549 * texture fetch/sample instructions:
2550 */
2551
2552 static void
2553 tex_info(nir_tex_instr *tex, unsigned *flagsp, unsigned *coordsp)
2554 {
2555 unsigned coords, flags = 0;
2556
2557 /* note: would use tex->coord_components.. except txs.. also,
2558 * since array index goes after shadow ref, we don't want to
2559 * count it:
2560 */
2561 switch (tex->sampler_dim) {
2562 case GLSL_SAMPLER_DIM_1D:
2563 case GLSL_SAMPLER_DIM_BUF:
2564 coords = 1;
2565 break;
2566 case GLSL_SAMPLER_DIM_2D:
2567 case GLSL_SAMPLER_DIM_RECT:
2568 case GLSL_SAMPLER_DIM_EXTERNAL:
2569 case GLSL_SAMPLER_DIM_MS:
2570 coords = 2;
2571 break;
2572 case GLSL_SAMPLER_DIM_3D:
2573 case GLSL_SAMPLER_DIM_CUBE:
2574 coords = 3;
2575 flags |= IR3_INSTR_3D;
2576 break;
2577 default:
2578 unreachable("bad sampler_dim");
2579 }
2580
2581 if (tex->is_shadow && tex->op != nir_texop_lod)
2582 flags |= IR3_INSTR_S;
2583
2584 if (tex->is_array && tex->op != nir_texop_lod)
2585 flags |= IR3_INSTR_A;
2586
2587 *flagsp = flags;
2588 *coordsp = coords;
2589 }
2590
2591 static void
2592 emit_tex(struct ir3_context *ctx, nir_tex_instr *tex)
2593 {
2594 struct ir3_block *b = ctx->block;
2595 struct ir3_instruction **dst, *sam, *src0[12], *src1[4];
2596 struct ir3_instruction * const *coord, * const *off, * const *ddx, * const *ddy;
2597 struct ir3_instruction *lod, *compare, *proj, *sample_index;
2598 bool has_bias = false, has_lod = false, has_proj = false, has_off = false;
2599 unsigned i, coords, flags;
2600 unsigned nsrc0 = 0, nsrc1 = 0;
2601 type_t type;
2602 opc_t opc = 0;
2603
2604 coord = off = ddx = ddy = NULL;
2605 lod = proj = compare = sample_index = NULL;
2606
2607 /* TODO: might just be one component for gathers? */
2608 dst = get_dst(ctx, &tex->dest, 4);
2609
2610 for (unsigned i = 0; i < tex->num_srcs; i++) {
2611 switch (tex->src[i].src_type) {
2612 case nir_tex_src_coord:
2613 coord = get_src(ctx, &tex->src[i].src);
2614 break;
2615 case nir_tex_src_bias:
2616 lod = get_src(ctx, &tex->src[i].src)[0];
2617 has_bias = true;
2618 break;
2619 case nir_tex_src_lod:
2620 lod = get_src(ctx, &tex->src[i].src)[0];
2621 has_lod = true;
2622 break;
2623 case nir_tex_src_comparator: /* shadow comparator */
2624 compare = get_src(ctx, &tex->src[i].src)[0];
2625 break;
2626 case nir_tex_src_projector:
2627 proj = get_src(ctx, &tex->src[i].src)[0];
2628 has_proj = true;
2629 break;
2630 case nir_tex_src_offset:
2631 off = get_src(ctx, &tex->src[i].src);
2632 has_off = true;
2633 break;
2634 case nir_tex_src_ddx:
2635 ddx = get_src(ctx, &tex->src[i].src);
2636 break;
2637 case nir_tex_src_ddy:
2638 ddy = get_src(ctx, &tex->src[i].src);
2639 break;
2640 case nir_tex_src_ms_index:
2641 sample_index = get_src(ctx, &tex->src[i].src)[0];
2642 break;
2643 default:
2644 compile_error(ctx, "Unhandled NIR tex src type: %d\n",
2645 tex->src[i].src_type);
2646 return;
2647 }
2648 }
2649
2650 switch (tex->op) {
2651 case nir_texop_tex: opc = has_lod ? OPC_SAML : OPC_SAM; break;
2652 case nir_texop_txb: opc = OPC_SAMB; break;
2653 case nir_texop_txl: opc = OPC_SAML; break;
2654 case nir_texop_txd: opc = OPC_SAMGQ; break;
2655 case nir_texop_txf: opc = OPC_ISAML; break;
2656 case nir_texop_lod: opc = OPC_GETLOD; break;
2657 case nir_texop_tg4:
2658 /* NOTE: a4xx might need to emulate gather w/ txf (this is
2659 * what blob does, seems gather is broken?), and a3xx did
2660 * not support it (but probably could also emulate).
2661 */
2662 switch (tex->component) {
2663 case 0: opc = OPC_GATHER4R; break;
2664 case 1: opc = OPC_GATHER4G; break;
2665 case 2: opc = OPC_GATHER4B; break;
2666 case 3: opc = OPC_GATHER4A; break;
2667 }
2668 break;
2669 case nir_texop_txf_ms: opc = OPC_ISAMM; break;
2670 case nir_texop_txs:
2671 case nir_texop_query_levels:
2672 case nir_texop_texture_samples:
2673 case nir_texop_samples_identical:
2674 case nir_texop_txf_ms_mcs:
2675 compile_error(ctx, "Unhandled NIR tex type: %d\n", tex->op);
2676 return;
2677 }
2678
2679 tex_info(tex, &flags, &coords);
2680
2681 /*
2682 * lay out the first argument in the proper order:
2683 * - actual coordinates first
2684 * - shadow reference
2685 * - array index
2686 * - projection w
2687 * - starting at offset 4, dpdx.xy, dpdy.xy
2688 *
2689 * bias/lod go into the second arg
2690 */
2691
2692 /* insert tex coords: */
2693 for (i = 0; i < coords; i++)
2694 src0[i] = coord[i];
2695
2696 nsrc0 = i;
2697
2698 /* NOTE a3xx (and possibly a4xx?) might be different, using isaml
2699 * with scaled x coord according to requested sample:
2700 */
2701 if (tex->op == nir_texop_txf_ms) {
2702 if (ctx->compiler->txf_ms_with_isaml) {
2703 /* the samples are laid out in x dimension as
2704 * 0 1 2 3
2705 * x_ms = (x << ms) + sample_index;
2706 */
2707 struct ir3_instruction *ms;
2708 ms = create_immed(b, (ctx->samples >> (2 * tex->texture_index)) & 3);
2709
2710 src0[0] = ir3_SHL_B(b, src0[0], 0, ms, 0);
2711 src0[0] = ir3_ADD_U(b, src0[0], 0, sample_index, 0);
2712
2713 opc = OPC_ISAML;
2714 } else {
2715 src0[nsrc0++] = sample_index;
2716 }
2717 }
2718
2719 /* scale up integer coords for TXF based on the LOD */
2720 if (ctx->compiler->unminify_coords && (opc == OPC_ISAML)) {
2721 assert(has_lod);
2722 for (i = 0; i < coords; i++)
2723 src0[i] = ir3_SHL_B(b, src0[i], 0, lod, 0);
2724 }
2725
2726 if (coords == 1) {
2727 /* hw doesn't do 1d, so we treat it as 2d with
2728 * height of 1, and patch up the y coord.
2729 * TODO: y coord should be (int)0 in some cases..
2730 */
2731 src0[nsrc0++] = create_immed(b, fui(0.5));
2732 }
2733
2734 if (tex->is_shadow && tex->op != nir_texop_lod)
2735 src0[nsrc0++] = compare;
2736
2737 if (tex->is_array && tex->op != nir_texop_lod) {
2738 struct ir3_instruction *idx = coord[coords];
2739
2740 /* the array coord for cube arrays needs 0.5 added to it */
2741 if (ctx->compiler->array_index_add_half && (opc != OPC_ISAML))
2742 idx = ir3_ADD_F(b, idx, 0, create_immed(b, fui(0.5)), 0);
2743
2744 src0[nsrc0++] = idx;
2745 }
2746
2747 if (has_proj) {
2748 src0[nsrc0++] = proj;
2749 flags |= IR3_INSTR_P;
2750 }
2751
2752 /* pad to 4, then ddx/ddy: */
2753 if (tex->op == nir_texop_txd) {
2754 while (nsrc0 < 4)
2755 src0[nsrc0++] = create_immed(b, fui(0.0));
2756 for (i = 0; i < coords; i++)
2757 src0[nsrc0++] = ddx[i];
2758 if (coords < 2)
2759 src0[nsrc0++] = create_immed(b, fui(0.0));
2760 for (i = 0; i < coords; i++)
2761 src0[nsrc0++] = ddy[i];
2762 if (coords < 2)
2763 src0[nsrc0++] = create_immed(b, fui(0.0));
2764 }
2765
2766 /*
2767 * second argument (if applicable):
2768 * - offsets
2769 * - lod
2770 * - bias
2771 */
2772 if (has_off | has_lod | has_bias) {
2773 if (has_off) {
2774 unsigned off_coords = coords;
2775 if (tex->sampler_dim == GLSL_SAMPLER_DIM_CUBE)
2776 off_coords--;
2777 for (i = 0; i < off_coords; i++)
2778 src1[nsrc1++] = off[i];
2779 if (off_coords < 2)
2780 src1[nsrc1++] = create_immed(b, fui(0.0));
2781 flags |= IR3_INSTR_O;
2782 }
2783
2784 if (has_lod | has_bias)
2785 src1[nsrc1++] = lod;
2786 }
2787
2788 switch (tex->dest_type) {
2789 case nir_type_invalid:
2790 case nir_type_float:
2791 type = TYPE_F32;
2792 break;
2793 case nir_type_int:
2794 type = TYPE_S32;
2795 break;
2796 case nir_type_uint:
2797 case nir_type_bool:
2798 type = TYPE_U32;
2799 break;
2800 default:
2801 unreachable("bad dest_type");
2802 }
2803
2804 if (opc == OPC_GETLOD)
2805 type = TYPE_U32;
2806
2807 unsigned tex_idx = tex->texture_index;
2808
2809 ctx->max_texture_index = MAX2(ctx->max_texture_index, tex_idx);
2810
2811 struct ir3_instruction *col0 = create_collect(ctx, src0, nsrc0);
2812 struct ir3_instruction *col1 = create_collect(ctx, src1, nsrc1);
2813
2814 sam = ir3_SAM(b, opc, type, 0b1111, flags,
2815 tex_idx, tex_idx, col0, col1);
2816
2817 if ((ctx->astc_srgb & (1 << tex_idx)) && !nir_tex_instr_is_query(tex)) {
2818 /* only need first 3 components: */
2819 sam->regs[0]->wrmask = 0x7;
2820 split_dest(b, dst, sam, 0, 3);
2821
2822 /* we need to sample the alpha separately with a non-ASTC
2823 * texture state:
2824 */
2825 sam = ir3_SAM(b, opc, type, 0b1000, flags,
2826 tex_idx, tex_idx, col0, col1);
2827
2828 array_insert(ctx->ir, ctx->ir->astc_srgb, sam);
2829
2830 /* fixup .w component: */
2831 split_dest(b, &dst[3], sam, 3, 1);
2832 } else {
2833 /* normal (non-workaround) case: */
2834 split_dest(b, dst, sam, 0, 4);
2835 }
2836
2837 /* GETLOD returns results in 4.8 fixed point */
2838 if (opc == OPC_GETLOD) {
2839 struct ir3_instruction *factor = create_immed(b, fui(1.0 / 256));
2840
2841 compile_assert(ctx, tex->dest_type == nir_type_float);
2842 for (i = 0; i < 2; i++) {
2843 dst[i] = ir3_MUL_F(b, ir3_COV(b, dst[i], TYPE_U32, TYPE_F32), 0,
2844 factor, 0);
2845 }
2846 }
2847
2848 put_dst(ctx, &tex->dest);
2849 }
2850
2851 static void
2852 emit_tex_query_levels(struct ir3_context *ctx, nir_tex_instr *tex)
2853 {
2854 struct ir3_block *b = ctx->block;
2855 struct ir3_instruction **dst, *sam;
2856
2857 dst = get_dst(ctx, &tex->dest, 1);
2858
2859 sam = ir3_SAM(b, OPC_GETINFO, TYPE_U32, 0b0100, 0,
2860 tex->texture_index, tex->texture_index, NULL, NULL);
2861
2862 /* even though there is only one component, since it ends
2863 * up in .z rather than .x, we need a split_dest()
2864 */
2865 split_dest(b, dst, sam, 0, 3);
2866
2867 /* The # of levels comes from getinfo.z. We need to add 1 to it, since
2868 * the value in TEX_CONST_0 is zero-based.
2869 */
2870 if (ctx->compiler->levels_add_one)
2871 dst[0] = ir3_ADD_U(b, dst[0], 0, create_immed(b, 1), 0);
2872
2873 put_dst(ctx, &tex->dest);
2874 }
2875
2876 static void
2877 emit_tex_txs(struct ir3_context *ctx, nir_tex_instr *tex)
2878 {
2879 struct ir3_block *b = ctx->block;
2880 struct ir3_instruction **dst, *sam;
2881 struct ir3_instruction *lod;
2882 unsigned flags, coords;
2883
2884 tex_info(tex, &flags, &coords);
2885
2886 /* Actually we want the number of dimensions, not coordinates. This
2887 * distinction only matters for cubes.
2888 */
2889 if (tex->sampler_dim == GLSL_SAMPLER_DIM_CUBE)
2890 coords = 2;
2891
2892 dst = get_dst(ctx, &tex->dest, 4);
2893
2894 compile_assert(ctx, tex->num_srcs == 1);
2895 compile_assert(ctx, tex->src[0].src_type == nir_tex_src_lod);
2896
2897 lod = get_src(ctx, &tex->src[0].src)[0];
2898
2899 sam = ir3_SAM(b, OPC_GETSIZE, TYPE_U32, 0b1111, flags,
2900 tex->texture_index, tex->texture_index, lod, NULL);
2901
2902 split_dest(b, dst, sam, 0, 4);
2903
2904 /* Array size actually ends up in .w rather than .z. This doesn't
2905 * matter for miplevel 0, but for higher mips the value in z is
2906 * minified whereas w stays. Also, the value in TEX_CONST_3_DEPTH is
2907 * returned, which means that we have to add 1 to it for arrays.
2908 */
2909 if (tex->is_array) {
2910 if (ctx->compiler->levels_add_one) {
2911 dst[coords] = ir3_ADD_U(b, dst[3], 0, create_immed(b, 1), 0);
2912 } else {
2913 dst[coords] = ir3_MOV(b, dst[3], TYPE_U32);
2914 }
2915 }
2916
2917 put_dst(ctx, &tex->dest);
2918 }
2919
2920 static void
2921 emit_jump(struct ir3_context *ctx, nir_jump_instr *jump)
2922 {
2923 switch (jump->type) {
2924 case nir_jump_break:
2925 case nir_jump_continue:
2926 case nir_jump_return:
2927 /* I *think* we can simply just ignore this, and use the
2928 * successor block link to figure out where we need to
2929 * jump to for break/continue
2930 */
2931 break;
2932 default:
2933 compile_error(ctx, "Unhandled NIR jump type: %d\n", jump->type);
2934 break;
2935 }
2936 }
2937
2938 static void
2939 emit_instr(struct ir3_context *ctx, nir_instr *instr)
2940 {
2941 switch (instr->type) {
2942 case nir_instr_type_alu:
2943 emit_alu(ctx, nir_instr_as_alu(instr));
2944 break;
2945 case nir_instr_type_deref:
2946 /* ignored, handled as part of the intrinsic they are src to */
2947 break;
2948 case nir_instr_type_intrinsic:
2949 emit_intrinsic(ctx, nir_instr_as_intrinsic(instr));
2950 break;
2951 case nir_instr_type_load_const:
2952 emit_load_const(ctx, nir_instr_as_load_const(instr));
2953 break;
2954 case nir_instr_type_ssa_undef:
2955 emit_undef(ctx, nir_instr_as_ssa_undef(instr));
2956 break;
2957 case nir_instr_type_tex: {
2958 nir_tex_instr *tex = nir_instr_as_tex(instr);
2959 /* couple tex instructions get special-cased:
2960 */
2961 switch (tex->op) {
2962 case nir_texop_txs:
2963 emit_tex_txs(ctx, tex);
2964 break;
2965 case nir_texop_query_levels:
2966 emit_tex_query_levels(ctx, tex);
2967 break;
2968 default:
2969 emit_tex(ctx, tex);
2970 break;
2971 }
2972 break;
2973 }
2974 case nir_instr_type_jump:
2975 emit_jump(ctx, nir_instr_as_jump(instr));
2976 break;
2977 case nir_instr_type_phi:
2978 /* we have converted phi webs to regs in NIR by now */
2979 compile_error(ctx, "Unexpected NIR instruction type: %d\n", instr->type);
2980 break;
2981 case nir_instr_type_call:
2982 case nir_instr_type_parallel_copy:
2983 compile_error(ctx, "Unhandled NIR instruction type: %d\n", instr->type);
2984 break;
2985 }
2986 }
2987
2988 static struct ir3_block *
2989 get_block(struct ir3_context *ctx, const nir_block *nblock)
2990 {
2991 struct ir3_block *block;
2992 struct hash_entry *hentry;
2993 unsigned i;
2994
2995 hentry = _mesa_hash_table_search(ctx->block_ht, nblock);
2996 if (hentry)
2997 return hentry->data;
2998
2999 block = ir3_block_create(ctx->ir);
3000 block->nblock = nblock;
3001 _mesa_hash_table_insert(ctx->block_ht, nblock, block);
3002
3003 block->predecessors_count = nblock->predecessors->entries;
3004 block->predecessors = ralloc_array_size(block,
3005 sizeof(block->predecessors[0]), block->predecessors_count);
3006 i = 0;
3007 set_foreach(nblock->predecessors, sentry) {
3008 block->predecessors[i++] = get_block(ctx, sentry->key);
3009 }
3010
3011 return block;
3012 }
3013
3014 static void
3015 emit_block(struct ir3_context *ctx, nir_block *nblock)
3016 {
3017 struct ir3_block *block = get_block(ctx, nblock);
3018
3019 for (int i = 0; i < ARRAY_SIZE(block->successors); i++) {
3020 if (nblock->successors[i]) {
3021 block->successors[i] =
3022 get_block(ctx, nblock->successors[i]);
3023 }
3024 }
3025
3026 ctx->block = block;
3027 list_addtail(&block->node, &ctx->ir->block_list);
3028
3029 /* re-emit addr register in each block if needed: */
3030 for (int i = 0; i < ARRAY_SIZE(ctx->addr_ht); i++) {
3031 _mesa_hash_table_destroy(ctx->addr_ht[i], NULL);
3032 ctx->addr_ht[i] = NULL;
3033 }
3034
3035 nir_foreach_instr(instr, nblock) {
3036 ctx->cur_instr = instr;
3037 emit_instr(ctx, instr);
3038 ctx->cur_instr = NULL;
3039 if (ctx->error)
3040 return;
3041 }
3042 }
3043
3044 static void emit_cf_list(struct ir3_context *ctx, struct exec_list *list);
3045
3046 static void
3047 emit_if(struct ir3_context *ctx, nir_if *nif)
3048 {
3049 struct ir3_instruction *condition = get_src(ctx, &nif->condition)[0];
3050
3051 ctx->block->condition =
3052 get_predicate(ctx, ir3_b2n(condition->block, condition));
3053
3054 emit_cf_list(ctx, &nif->then_list);
3055 emit_cf_list(ctx, &nif->else_list);
3056 }
3057
3058 static void
3059 emit_loop(struct ir3_context *ctx, nir_loop *nloop)
3060 {
3061 emit_cf_list(ctx, &nloop->body);
3062 }
3063
3064 static void
3065 emit_cf_list(struct ir3_context *ctx, struct exec_list *list)
3066 {
3067 foreach_list_typed(nir_cf_node, node, node, list) {
3068 switch (node->type) {
3069 case nir_cf_node_block:
3070 emit_block(ctx, nir_cf_node_as_block(node));
3071 break;
3072 case nir_cf_node_if:
3073 emit_if(ctx, nir_cf_node_as_if(node));
3074 break;
3075 case nir_cf_node_loop:
3076 emit_loop(ctx, nir_cf_node_as_loop(node));
3077 break;
3078 case nir_cf_node_function:
3079 compile_error(ctx, "TODO\n");
3080 break;
3081 }
3082 }
3083 }
3084
3085 /* emit stream-out code. At this point, the current block is the original
3086 * (nir) end block, and nir ensures that all flow control paths terminate
3087 * into the end block. We re-purpose the original end block to generate
3088 * the 'if (vtxcnt < maxvtxcnt)' condition, then append the conditional
3089 * block holding stream-out write instructions, followed by the new end
3090 * block:
3091 *
3092 * blockOrigEnd {
3093 * p0.x = (vtxcnt < maxvtxcnt)
3094 * // succs: blockStreamOut, blockNewEnd
3095 * }
3096 * blockStreamOut {
3097 * ... stream-out instructions ...
3098 * // succs: blockNewEnd
3099 * }
3100 * blockNewEnd {
3101 * }
3102 */
3103 static void
3104 emit_stream_out(struct ir3_context *ctx)
3105 {
3106 struct ir3_shader_variant *v = ctx->so;
3107 struct ir3 *ir = ctx->ir;
3108 struct ir3_stream_output_info *strmout =
3109 &ctx->so->shader->stream_output;
3110 struct ir3_block *orig_end_block, *stream_out_block, *new_end_block;
3111 struct ir3_instruction *vtxcnt, *maxvtxcnt, *cond;
3112 struct ir3_instruction *bases[IR3_MAX_SO_BUFFERS];
3113
3114 /* create vtxcnt input in input block at top of shader,
3115 * so that it is seen as live over the entire duration
3116 * of the shader:
3117 */
3118 vtxcnt = create_input(ctx, 0);
3119 add_sysval_input(ctx, SYSTEM_VALUE_VERTEX_CNT, vtxcnt);
3120
3121 maxvtxcnt = create_driver_param(ctx, IR3_DP_VTXCNT_MAX);
3122
3123 /* at this point, we are at the original 'end' block,
3124 * re-purpose this block to stream-out condition, then
3125 * append stream-out block and new-end block
3126 */
3127 orig_end_block = ctx->block;
3128
3129 // TODO these blocks need to update predecessors..
3130 // maybe w/ store_global intrinsic, we could do this
3131 // stuff in nir->nir pass
3132
3133 stream_out_block = ir3_block_create(ir);
3134 list_addtail(&stream_out_block->node, &ir->block_list);
3135
3136 new_end_block = ir3_block_create(ir);
3137 list_addtail(&new_end_block->node, &ir->block_list);
3138
3139 orig_end_block->successors[0] = stream_out_block;
3140 orig_end_block->successors[1] = new_end_block;
3141 stream_out_block->successors[0] = new_end_block;
3142
3143 /* setup 'if (vtxcnt < maxvtxcnt)' condition: */
3144 cond = ir3_CMPS_S(ctx->block, vtxcnt, 0, maxvtxcnt, 0);
3145 cond->regs[0]->num = regid(REG_P0, 0);
3146 cond->cat2.condition = IR3_COND_LT;
3147
3148 /* condition goes on previous block to the conditional,
3149 * since it is used to pick which of the two successor
3150 * paths to take:
3151 */
3152 orig_end_block->condition = cond;
3153
3154 /* switch to stream_out_block to generate the stream-out
3155 * instructions:
3156 */
3157 ctx->block = stream_out_block;
3158
3159 /* Calculate base addresses based on vtxcnt. Instructions
3160 * generated for bases not used in following loop will be
3161 * stripped out in the backend.
3162 */
3163 for (unsigned i = 0; i < IR3_MAX_SO_BUFFERS; i++) {
3164 unsigned stride = strmout->stride[i];
3165 struct ir3_instruction *base, *off;
3166
3167 base = create_uniform(ctx, regid(v->constbase.tfbo, i));
3168
3169 /* 24-bit should be enough: */
3170 off = ir3_MUL_U(ctx->block, vtxcnt, 0,
3171 create_immed(ctx->block, stride * 4), 0);
3172
3173 bases[i] = ir3_ADD_S(ctx->block, off, 0, base, 0);
3174 }
3175
3176 /* Generate the per-output store instructions: */
3177 for (unsigned i = 0; i < strmout->num_outputs; i++) {
3178 for (unsigned j = 0; j < strmout->output[i].num_components; j++) {
3179 unsigned c = j + strmout->output[i].start_component;
3180 struct ir3_instruction *base, *out, *stg;
3181
3182 base = bases[strmout->output[i].output_buffer];
3183 out = ctx->ir->outputs[regid(strmout->output[i].register_index, c)];
3184
3185 stg = ir3_STG(ctx->block, base, 0, out, 0,
3186 create_immed(ctx->block, 1), 0);
3187 stg->cat6.type = TYPE_U32;
3188 stg->cat6.dst_offset = (strmout->output[i].dst_offset + j) * 4;
3189
3190 array_insert(ctx->block, ctx->block->keeps, stg);
3191 }
3192 }
3193
3194 /* and finally switch to the new_end_block: */
3195 ctx->block = new_end_block;
3196 }
3197
3198 static void
3199 emit_function(struct ir3_context *ctx, nir_function_impl *impl)
3200 {
3201 nir_metadata_require(impl, nir_metadata_block_index);
3202
3203 emit_cf_list(ctx, &impl->body);
3204 emit_block(ctx, impl->end_block);
3205
3206 /* at this point, we should have a single empty block,
3207 * into which we emit the 'end' instruction.
3208 */
3209 compile_assert(ctx, list_empty(&ctx->block->instr_list));
3210
3211 /* If stream-out (aka transform-feedback) enabled, emit the
3212 * stream-out instructions, followed by a new empty block (into
3213 * which the 'end' instruction lands).
3214 *
3215 * NOTE: it is done in this order, rather than inserting before
3216 * we emit end_block, because NIR guarantees that all blocks
3217 * flow into end_block, and that end_block has no successors.
3218 * So by re-purposing end_block as the first block of stream-
3219 * out, we guarantee that all exit paths flow into the stream-
3220 * out instructions.
3221 */
3222 if ((ctx->compiler->gpu_id < 500) &&
3223 (ctx->so->shader->stream_output.num_outputs > 0) &&
3224 !ctx->so->binning_pass) {
3225 debug_assert(ctx->so->type == MESA_SHADER_VERTEX);
3226 emit_stream_out(ctx);
3227 }
3228
3229 ir3_END(ctx->block);
3230 }
3231
3232 static struct ir3_instruction *
3233 create_frag_coord(struct ir3_context *ctx, unsigned comp)
3234 {
3235 struct ir3_block *block = ctx->block;
3236 struct ir3_instruction *instr;
3237
3238 if (!ctx->frag_coord) {
3239 ctx->frag_coord = create_input_compmask(ctx, 0, 0xf);
3240 /* defer add_sysval_input() until after all inputs created */
3241 }
3242
3243 split_dest(block, &instr, ctx->frag_coord, comp, 1);
3244
3245 switch (comp) {
3246 case 0: /* .x */
3247 case 1: /* .y */
3248 /* for frag_coord, we get unsigned values.. we need
3249 * to subtract (integer) 8 and divide by 16 (right-
3250 * shift by 4) then convert to float:
3251 *
3252 * sub.s tmp, src, 8
3253 * shr.b tmp, tmp, 4
3254 * mov.u32f32 dst, tmp
3255 *
3256 */
3257 instr = ir3_SUB_S(block, instr, 0,
3258 create_immed(block, 8), 0);
3259 instr = ir3_SHR_B(block, instr, 0,
3260 create_immed(block, 4), 0);
3261 instr = ir3_COV(block, instr, TYPE_U32, TYPE_F32);
3262
3263 return instr;
3264 case 2: /* .z */
3265 case 3: /* .w */
3266 default:
3267 /* seems that we can use these as-is: */
3268 return instr;
3269 }
3270 }
3271
3272 static void
3273 setup_input(struct ir3_context *ctx, nir_variable *in)
3274 {
3275 struct ir3_shader_variant *so = ctx->so;
3276 unsigned ncomp = glsl_get_components(in->type);
3277 unsigned n = in->data.driver_location;
3278 unsigned slot = in->data.location;
3279
3280 /* let's pretend things other than vec4 don't exist: */
3281 ncomp = MAX2(ncomp, 4);
3282
3283 /* skip unread inputs, we could end up with (for example), unsplit
3284 * matrix/etc inputs in the case they are not read, so just silently
3285 * skip these.
3286 */
3287 if (ncomp > 4)
3288 return;
3289
3290 compile_assert(ctx, ncomp == 4);
3291
3292 so->inputs[n].slot = slot;
3293 so->inputs[n].compmask = (1 << ncomp) - 1;
3294 so->inputs_count = MAX2(so->inputs_count, n + 1);
3295 so->inputs[n].interpolate = in->data.interpolation;
3296
3297 if (ctx->so->type == MESA_SHADER_FRAGMENT) {
3298 for (int i = 0; i < ncomp; i++) {
3299 struct ir3_instruction *instr = NULL;
3300 unsigned idx = (n * 4) + i;
3301
3302 if (slot == VARYING_SLOT_POS) {
3303 so->inputs[n].bary = false;
3304 so->frag_coord = true;
3305 instr = create_frag_coord(ctx, i);
3306 } else if (slot == VARYING_SLOT_PNTC) {
3307 /* see for example st_nir_fixup_varying_slots().. this is
3308 * maybe a bit mesa/st specific. But we need things to line
3309 * up for this in fdN_program:
3310 * unsigned texmask = 1 << (slot - VARYING_SLOT_VAR0);
3311 * if (emit->sprite_coord_enable & texmask) {
3312 * ...
3313 * }
3314 */
3315 so->inputs[n].slot = VARYING_SLOT_VAR8;
3316 so->inputs[n].bary = true;
3317 instr = create_frag_input(ctx, false);
3318 } else {
3319 bool use_ldlv = false;
3320
3321 /* detect the special case for front/back colors where
3322 * we need to do flat vs smooth shading depending on
3323 * rast state:
3324 */
3325 if (in->data.interpolation == INTERP_MODE_NONE) {
3326 switch (slot) {
3327 case VARYING_SLOT_COL0:
3328 case VARYING_SLOT_COL1:
3329 case VARYING_SLOT_BFC0:
3330 case VARYING_SLOT_BFC1:
3331 so->inputs[n].rasterflat = true;
3332 break;
3333 default:
3334 break;
3335 }
3336 }
3337
3338 if (ctx->compiler->flat_bypass) {
3339 if ((so->inputs[n].interpolate == INTERP_MODE_FLAT) ||
3340 (so->inputs[n].rasterflat && ctx->so->key.rasterflat))
3341 use_ldlv = true;
3342 }
3343
3344 so->inputs[n].bary = true;
3345
3346 instr = create_frag_input(ctx, use_ldlv);
3347 }
3348
3349 compile_assert(ctx, idx < ctx->ir->ninputs);
3350
3351 ctx->ir->inputs[idx] = instr;
3352 }
3353 } else if (ctx->so->type == MESA_SHADER_VERTEX) {
3354 for (int i = 0; i < ncomp; i++) {
3355 unsigned idx = (n * 4) + i;
3356 compile_assert(ctx, idx < ctx->ir->ninputs);
3357 ctx->ir->inputs[idx] = create_input(ctx, idx);
3358 }
3359 } else {
3360 compile_error(ctx, "unknown shader type: %d\n", ctx->so->type);
3361 }
3362
3363 if (so->inputs[n].bary || (ctx->so->type == MESA_SHADER_VERTEX)) {
3364 so->total_in += ncomp;
3365 }
3366 }
3367
3368 static void
3369 setup_output(struct ir3_context *ctx, nir_variable *out)
3370 {
3371 struct ir3_shader_variant *so = ctx->so;
3372 unsigned ncomp = glsl_get_components(out->type);
3373 unsigned n = out->data.driver_location;
3374 unsigned slot = out->data.location;
3375 unsigned comp = 0;
3376
3377 /* let's pretend things other than vec4 don't exist: */
3378 ncomp = MAX2(ncomp, 4);
3379 compile_assert(ctx, ncomp == 4);
3380
3381 if (ctx->so->type == MESA_SHADER_FRAGMENT) {
3382 switch (slot) {
3383 case FRAG_RESULT_DEPTH:
3384 comp = 2; /* tgsi will write to .z component */
3385 so->writes_pos = true;
3386 break;
3387 case FRAG_RESULT_COLOR:
3388 so->color0_mrt = 1;
3389 break;
3390 default:
3391 if (slot >= FRAG_RESULT_DATA0)
3392 break;
3393 compile_error(ctx, "unknown FS output name: %s\n",
3394 gl_frag_result_name(slot));
3395 }
3396 } else if (ctx->so->type == MESA_SHADER_VERTEX) {
3397 switch (slot) {
3398 case VARYING_SLOT_POS:
3399 so->writes_pos = true;
3400 break;
3401 case VARYING_SLOT_PSIZ:
3402 so->writes_psize = true;
3403 break;
3404 case VARYING_SLOT_COL0:
3405 case VARYING_SLOT_COL1:
3406 case VARYING_SLOT_BFC0:
3407 case VARYING_SLOT_BFC1:
3408 case VARYING_SLOT_FOGC:
3409 case VARYING_SLOT_CLIP_DIST0:
3410 case VARYING_SLOT_CLIP_DIST1:
3411 case VARYING_SLOT_CLIP_VERTEX:
3412 break;
3413 default:
3414 if (slot >= VARYING_SLOT_VAR0)
3415 break;
3416 if ((VARYING_SLOT_TEX0 <= slot) && (slot <= VARYING_SLOT_TEX7))
3417 break;
3418 compile_error(ctx, "unknown VS output name: %s\n",
3419 gl_varying_slot_name(slot));
3420 }
3421 } else {
3422 compile_error(ctx, "unknown shader type: %d\n", ctx->so->type);
3423 }
3424
3425 compile_assert(ctx, n < ARRAY_SIZE(so->outputs));
3426
3427 so->outputs[n].slot = slot;
3428 so->outputs[n].regid = regid(n, comp);
3429 so->outputs_count = MAX2(so->outputs_count, n + 1);
3430
3431 for (int i = 0; i < ncomp; i++) {
3432 unsigned idx = (n * 4) + i;
3433 compile_assert(ctx, idx < ctx->ir->noutputs);
3434 ctx->ir->outputs[idx] = create_immed(ctx->block, fui(0.0));
3435 }
3436 }
3437
3438 static int
3439 max_drvloc(struct exec_list *vars)
3440 {
3441 int drvloc = -1;
3442 nir_foreach_variable(var, vars) {
3443 drvloc = MAX2(drvloc, (int)var->data.driver_location);
3444 }
3445 return drvloc;
3446 }
3447
3448 static const unsigned max_sysvals[] = {
3449 [MESA_SHADER_FRAGMENT] = 24, // TODO
3450 [MESA_SHADER_VERTEX] = 16,
3451 [MESA_SHADER_COMPUTE] = 16, // TODO how many do we actually need?
3452 };
3453
3454 static void
3455 emit_instructions(struct ir3_context *ctx)
3456 {
3457 unsigned ninputs, noutputs;
3458 nir_function_impl *fxn = nir_shader_get_entrypoint(ctx->s);
3459
3460 ninputs = (max_drvloc(&ctx->s->inputs) + 1) * 4;
3461 noutputs = (max_drvloc(&ctx->s->outputs) + 1) * 4;
3462
3463 /* we need to leave room for sysvals:
3464 */
3465 ninputs += max_sysvals[ctx->so->type];
3466
3467 ctx->ir = ir3_create(ctx->compiler, ninputs, noutputs);
3468
3469 /* Create inputs in first block: */
3470 ctx->block = get_block(ctx, nir_start_block(fxn));
3471 ctx->in_block = ctx->block;
3472 list_addtail(&ctx->block->node, &ctx->ir->block_list);
3473
3474 ninputs -= max_sysvals[ctx->so->type];
3475
3476 /* for fragment shader, the vcoord input register is used as the
3477 * base for bary.f varying fetch instrs:
3478 */
3479 struct ir3_instruction *vcoord = NULL;
3480 if (ctx->so->type == MESA_SHADER_FRAGMENT) {
3481 struct ir3_instruction *xy[2];
3482
3483 vcoord = create_input_compmask(ctx, 0, 0x3);
3484 split_dest(ctx->block, xy, vcoord, 0, 2);
3485
3486 ctx->frag_vcoord = create_collect(ctx, xy, 2);
3487 }
3488
3489 /* Setup inputs: */
3490 nir_foreach_variable(var, &ctx->s->inputs) {
3491 setup_input(ctx, var);
3492 }
3493
3494 /* Defer add_sysval_input() stuff until after setup_inputs(),
3495 * because sysvals need to be appended after varyings:
3496 */
3497 if (vcoord) {
3498 add_sysval_input_compmask(ctx, SYSTEM_VALUE_VARYING_COORD,
3499 0x3, vcoord);
3500 }
3501
3502 if (ctx->frag_coord) {
3503 add_sysval_input_compmask(ctx, SYSTEM_VALUE_FRAG_COORD,
3504 0xf, ctx->frag_coord);
3505 }
3506
3507 /* Setup outputs: */
3508 nir_foreach_variable(var, &ctx->s->outputs) {
3509 setup_output(ctx, var);
3510 }
3511
3512 /* Setup registers (which should only be arrays): */
3513 nir_foreach_register(reg, &ctx->s->registers) {
3514 declare_array(ctx, reg);
3515 }
3516
3517 /* NOTE: need to do something more clever when we support >1 fxn */
3518 nir_foreach_register(reg, &fxn->registers) {
3519 declare_array(ctx, reg);
3520 }
3521 /* And emit the body: */
3522 ctx->impl = fxn;
3523 emit_function(ctx, fxn);
3524 }
3525
3526 /* from NIR perspective, we actually have varying inputs. But the varying
3527 * inputs, from an IR standpoint, are just bary.f/ldlv instructions. The
3528 * only actual inputs are the sysvals.
3529 */
3530 static void
3531 fixup_frag_inputs(struct ir3_context *ctx)
3532 {
3533 struct ir3_shader_variant *so = ctx->so;
3534 struct ir3 *ir = ctx->ir;
3535 unsigned i = 0;
3536
3537 /* sysvals should appear at the end of the inputs, drop everything else: */
3538 while ((i < so->inputs_count) && !so->inputs[i].sysval)
3539 i++;
3540
3541 /* at IR level, inputs are always blocks of 4 scalars: */
3542 i *= 4;
3543
3544 ir->inputs = &ir->inputs[i];
3545 ir->ninputs -= i;
3546 }
3547
3548 /* Fixup tex sampler state for astc/srgb workaround instructions. We
3549 * need to assign the tex state indexes for these after we know the
3550 * max tex index.
3551 */
3552 static void
3553 fixup_astc_srgb(struct ir3_context *ctx)
3554 {
3555 struct ir3_shader_variant *so = ctx->so;
3556 /* indexed by original tex idx, value is newly assigned alpha sampler
3557 * state tex idx. Zero is invalid since there is at least one sampler
3558 * if we get here.
3559 */
3560 unsigned alt_tex_state[16] = {0};
3561 unsigned tex_idx = ctx->max_texture_index + 1;
3562 unsigned idx = 0;
3563
3564 so->astc_srgb.base = tex_idx;
3565
3566 for (unsigned i = 0; i < ctx->ir->astc_srgb_count; i++) {
3567 struct ir3_instruction *sam = ctx->ir->astc_srgb[i];
3568
3569 compile_assert(ctx, sam->cat5.tex < ARRAY_SIZE(alt_tex_state));
3570
3571 if (alt_tex_state[sam->cat5.tex] == 0) {
3572 /* assign new alternate/alpha tex state slot: */
3573 alt_tex_state[sam->cat5.tex] = tex_idx++;
3574 so->astc_srgb.orig_idx[idx++] = sam->cat5.tex;
3575 so->astc_srgb.count++;
3576 }
3577
3578 sam->cat5.tex = alt_tex_state[sam->cat5.tex];
3579 }
3580 }
3581
3582 static void
3583 fixup_binning_pass(struct ir3_context *ctx)
3584 {
3585 struct ir3_shader_variant *so = ctx->so;
3586 struct ir3 *ir = ctx->ir;
3587 unsigned i, j;
3588
3589 for (i = 0, j = 0; i < so->outputs_count; i++) {
3590 unsigned slot = so->outputs[i].slot;
3591
3592 /* throw away everything but first position/psize */
3593 if ((slot == VARYING_SLOT_POS) || (slot == VARYING_SLOT_PSIZ)) {
3594 if (i != j) {
3595 so->outputs[j] = so->outputs[i];
3596 ir->outputs[(j*4)+0] = ir->outputs[(i*4)+0];
3597 ir->outputs[(j*4)+1] = ir->outputs[(i*4)+1];
3598 ir->outputs[(j*4)+2] = ir->outputs[(i*4)+2];
3599 ir->outputs[(j*4)+3] = ir->outputs[(i*4)+3];
3600 }
3601 j++;
3602 }
3603 }
3604 so->outputs_count = j;
3605 ir->noutputs = j * 4;
3606 }
3607
3608 int
3609 ir3_compile_shader_nir(struct ir3_compiler *compiler,
3610 struct ir3_shader_variant *so)
3611 {
3612 struct ir3_context *ctx;
3613 struct ir3 *ir;
3614 struct ir3_instruction **inputs;
3615 unsigned i, actual_in, inloc;
3616 int ret = 0, max_bary;
3617
3618 assert(!so->ir);
3619
3620 ctx = compile_init(compiler, so);
3621 if (!ctx) {
3622 DBG("INIT failed!");
3623 ret = -1;
3624 goto out;
3625 }
3626
3627 emit_instructions(ctx);
3628
3629 if (ctx->error) {
3630 DBG("EMIT failed!");
3631 ret = -1;
3632 goto out;
3633 }
3634
3635 ir = so->ir = ctx->ir;
3636
3637 /* keep track of the inputs from TGSI perspective.. */
3638 inputs = ir->inputs;
3639
3640 /* but fixup actual inputs for frag shader: */
3641 if (so->type == MESA_SHADER_FRAGMENT)
3642 fixup_frag_inputs(ctx);
3643
3644 /* at this point, for binning pass, throw away unneeded outputs: */
3645 if (so->binning_pass && (ctx->compiler->gpu_id < 600))
3646 fixup_binning_pass(ctx);
3647
3648 /* if we want half-precision outputs, mark the output registers
3649 * as half:
3650 */
3651 if (so->key.half_precision) {
3652 for (i = 0; i < ir->noutputs; i++) {
3653 struct ir3_instruction *out = ir->outputs[i];
3654
3655 if (!out)
3656 continue;
3657
3658 /* if frag shader writes z, that needs to be full precision: */
3659 if (so->outputs[i/4].slot == FRAG_RESULT_DEPTH)
3660 continue;
3661
3662 out->regs[0]->flags |= IR3_REG_HALF;
3663 /* output could be a fanout (ie. texture fetch output)
3664 * in which case we need to propagate the half-reg flag
3665 * up to the definer so that RA sees it:
3666 */
3667 if (out->opc == OPC_META_FO) {
3668 out = out->regs[1]->instr;
3669 out->regs[0]->flags |= IR3_REG_HALF;
3670 }
3671
3672 if (out->opc == OPC_MOV) {
3673 out->cat1.dst_type = half_type(out->cat1.dst_type);
3674 }
3675 }
3676 }
3677
3678 if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
3679 printf("BEFORE CP:\n");
3680 ir3_print(ir);
3681 }
3682
3683 ir3_cp(ir, so);
3684
3685 /* at this point, for binning pass, throw away unneeded outputs:
3686 * Note that for a6xx and later, we do this after ir3_cp to ensure
3687 * that the uniform/constant layout for BS and VS matches, so that
3688 * we can re-use same VS_CONST state group.
3689 */
3690 if (so->binning_pass && (ctx->compiler->gpu_id >= 600))
3691 fixup_binning_pass(ctx);
3692
3693 /* Insert mov if there's same instruction for each output.
3694 * eg. dEQP-GLES31.functional.shaders.opaque_type_indexing.sampler.const_expression.vertex.sampler2dshadow
3695 */
3696 for (int i = ir->noutputs - 1; i >= 0; i--) {
3697 if (!ir->outputs[i])
3698 continue;
3699 for (unsigned j = 0; j < i; j++) {
3700 if (ir->outputs[i] == ir->outputs[j]) {
3701 ir->outputs[i] =
3702 ir3_MOV(ir->outputs[i]->block, ir->outputs[i], TYPE_F32);
3703 }
3704 }
3705 }
3706
3707 if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
3708 printf("BEFORE GROUPING:\n");
3709 ir3_print(ir);
3710 }
3711
3712 ir3_sched_add_deps(ir);
3713
3714 /* Group left/right neighbors, inserting mov's where needed to
3715 * solve conflicts:
3716 */
3717 ir3_group(ir);
3718
3719 if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
3720 printf("AFTER GROUPING:\n");
3721 ir3_print(ir);
3722 }
3723
3724 ir3_depth(ir);
3725
3726 if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
3727 printf("AFTER DEPTH:\n");
3728 ir3_print(ir);
3729 }
3730
3731 ret = ir3_sched(ir);
3732 if (ret) {
3733 DBG("SCHED failed!");
3734 goto out;
3735 }
3736
3737 if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
3738 printf("AFTER SCHED:\n");
3739 ir3_print(ir);
3740 }
3741
3742 ret = ir3_ra(ir, so->type, so->frag_coord, so->frag_face);
3743 if (ret) {
3744 DBG("RA failed!");
3745 goto out;
3746 }
3747
3748 if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
3749 printf("AFTER RA:\n");
3750 ir3_print(ir);
3751 }
3752
3753 /* fixup input/outputs: */
3754 for (i = 0; i < so->outputs_count; i++) {
3755 so->outputs[i].regid = ir->outputs[i*4]->regs[0]->num;
3756 }
3757
3758 /* Note that some or all channels of an input may be unused: */
3759 actual_in = 0;
3760 inloc = 0;
3761 for (i = 0; i < so->inputs_count; i++) {
3762 unsigned j, reg = regid(63,0), compmask = 0, maxcomp = 0;
3763 so->inputs[i].ncomp = 0;
3764 so->inputs[i].inloc = inloc;
3765 for (j = 0; j < 4; j++) {
3766 struct ir3_instruction *in = inputs[(i*4) + j];
3767 if (in && !(in->flags & IR3_INSTR_UNUSED)) {
3768 compmask |= (1 << j);
3769 reg = in->regs[0]->num - j;
3770 actual_in++;
3771 so->inputs[i].ncomp++;
3772 if ((so->type == MESA_SHADER_FRAGMENT) && so->inputs[i].bary) {
3773 /* assign inloc: */
3774 assert(in->regs[1]->flags & IR3_REG_IMMED);
3775 in->regs[1]->iim_val = inloc + j;
3776 maxcomp = j + 1;
3777 }
3778 }
3779 }
3780 if ((so->type == MESA_SHADER_FRAGMENT) && compmask && so->inputs[i].bary) {
3781 so->varying_in++;
3782 so->inputs[i].compmask = (1 << maxcomp) - 1;
3783 inloc += maxcomp;
3784 } else if (!so->inputs[i].sysval) {
3785 so->inputs[i].compmask = compmask;
3786 }
3787 so->inputs[i].regid = reg;
3788 }
3789
3790 if (ctx->astc_srgb)
3791 fixup_astc_srgb(ctx);
3792
3793 /* We need to do legalize after (for frag shader's) the "bary.f"
3794 * offsets (inloc) have been assigned.
3795 */
3796 ir3_legalize(ir, &so->num_samp, &so->has_ssbo, &max_bary);
3797
3798 if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
3799 printf("AFTER LEGALIZE:\n");
3800 ir3_print(ir);
3801 }
3802
3803 /* Note that actual_in counts inputs that are not bary.f'd for FS: */
3804 if (so->type == MESA_SHADER_VERTEX)
3805 so->total_in = actual_in;
3806 else
3807 so->total_in = max_bary + 1;
3808
3809 out:
3810 if (ret) {
3811 if (so->ir)
3812 ir3_destroy(so->ir);
3813 so->ir = NULL;
3814 }
3815 compile_free(ctx);
3816
3817 return ret;
3818 }