nir: Make boolean conversions sized just like the others
[mesa.git] / src / freedreno / ir3 / ir3_compiler_nir.c
1 /*
2 * Copyright (C) 2015 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #include <stdarg.h>
28
29 #include "util/u_string.h"
30 #include "util/u_memory.h"
31 #include "util/u_math.h"
32
33 #include "ir3_compiler.h"
34 #include "ir3_shader.h"
35 #include "ir3_nir.h"
36
37 #include "instr-a3xx.h"
38 #include "ir3.h"
39
40 /* for conditionally setting boolean flag(s): */
41 #define COND(bool, val) ((bool) ? (val) : 0)
42
43 #define DBG(fmt, ...) \
44 do { debug_printf("%s:%d: "fmt "\n", \
45 __FUNCTION__, __LINE__, ##__VA_ARGS__); } while (0)
46
47 struct ir3_context {
48 struct ir3_compiler *compiler;
49
50 struct nir_shader *s;
51
52 struct nir_instr *cur_instr; /* current instruction, just for debug */
53
54 struct ir3 *ir;
55 struct ir3_shader_variant *so;
56
57 struct ir3_block *block; /* the current block */
58 struct ir3_block *in_block; /* block created for shader inputs */
59
60 nir_function_impl *impl;
61
62 /* For fragment shaders, varyings are not actual shader inputs,
63 * instead the hw passes a varying-coord which is used with
64 * bary.f.
65 *
66 * But NIR doesn't know that, it still declares varyings as
67 * inputs. So we do all the input tracking normally and fix
68 * things up after compile_instructions()
69 *
70 * NOTE that frag_vcoord is the hardware position (possibly it
71 * is actually an index or tag or some such.. it is *not*
72 * values that can be directly used for gl_FragCoord..)
73 */
74 struct ir3_instruction *frag_vcoord;
75
76 /* for fragment shaders, for gl_FrontFacing and gl_FragCoord: */
77 struct ir3_instruction *frag_face, *frag_coord;
78
79 /* For vertex shaders, keep track of the system values sources */
80 struct ir3_instruction *vertex_id, *basevertex, *instance_id;
81
82 /* For fragment shaders: */
83 struct ir3_instruction *samp_id, *samp_mask_in;
84
85 /* Compute shader inputs: */
86 struct ir3_instruction *local_invocation_id, *work_group_id;
87
88 /* mapping from nir_register to defining instruction: */
89 struct hash_table *def_ht;
90
91 unsigned num_arrays;
92
93 /* a common pattern for indirect addressing is to request the
94 * same address register multiple times. To avoid generating
95 * duplicate instruction sequences (which our backend does not
96 * try to clean up, since that should be done as the NIR stage)
97 * we cache the address value generated for a given src value:
98 *
99 * Note that we have to cache these per alignment, since same
100 * src used for an array of vec1 cannot be also used for an
101 * array of vec4.
102 */
103 struct hash_table *addr_ht[4];
104
105 /* last dst array, for indirect we need to insert a var-store.
106 */
107 struct ir3_instruction **last_dst;
108 unsigned last_dst_n;
109
110 /* maps nir_block to ir3_block, mostly for the purposes of
111 * figuring out the blocks successors
112 */
113 struct hash_table *block_ht;
114
115 /* on a4xx, bitmask of samplers which need astc+srgb workaround: */
116 unsigned astc_srgb;
117
118 unsigned samples; /* bitmask of x,y sample shifts */
119
120 unsigned max_texture_index;
121
122 /* set if we encounter something we can't handle yet, so we
123 * can bail cleanly and fallback to TGSI compiler f/e
124 */
125 bool error;
126 };
127
128 /* gpu pointer size in units of 32bit registers/slots */
129 static unsigned pointer_size(struct ir3_context *ctx)
130 {
131 return (ctx->compiler->gpu_id >= 500) ? 2 : 1;
132 }
133
134 static struct ir3_instruction * create_immed(struct ir3_block *block, uint32_t val);
135 static struct ir3_block * get_block(struct ir3_context *ctx, const nir_block *nblock);
136
137
138 static struct ir3_context *
139 compile_init(struct ir3_compiler *compiler,
140 struct ir3_shader_variant *so)
141 {
142 struct ir3_context *ctx = rzalloc(NULL, struct ir3_context);
143
144 if (compiler->gpu_id >= 400) {
145 if (so->type == MESA_SHADER_VERTEX) {
146 ctx->astc_srgb = so->key.vastc_srgb;
147 } else if (so->type == MESA_SHADER_FRAGMENT) {
148 ctx->astc_srgb = so->key.fastc_srgb;
149 }
150
151 } else {
152 if (so->type == MESA_SHADER_VERTEX) {
153 ctx->samples = so->key.vsamples;
154 } else if (so->type == MESA_SHADER_FRAGMENT) {
155 ctx->samples = so->key.fsamples;
156 }
157 }
158
159 ctx->compiler = compiler;
160 ctx->so = so;
161 ctx->def_ht = _mesa_hash_table_create(ctx,
162 _mesa_hash_pointer, _mesa_key_pointer_equal);
163 ctx->block_ht = _mesa_hash_table_create(ctx,
164 _mesa_hash_pointer, _mesa_key_pointer_equal);
165
166 /* TODO: maybe generate some sort of bitmask of what key
167 * lowers vs what shader has (ie. no need to lower
168 * texture clamp lowering if no texture sample instrs)..
169 * although should be done further up the stack to avoid
170 * creating duplicate variants..
171 */
172
173 if (ir3_key_lowers_nir(&so->key)) {
174 nir_shader *s = nir_shader_clone(ctx, so->shader->nir);
175 ctx->s = ir3_optimize_nir(so->shader, s, &so->key);
176 } else {
177 /* fast-path for shader key that lowers nothing in NIR: */
178 ctx->s = so->shader->nir;
179 }
180
181 /* this needs to be the last pass run, so do this here instead of
182 * in ir3_optimize_nir():
183 */
184 NIR_PASS_V(ctx->s, nir_lower_locals_to_regs);
185 NIR_PASS_V(ctx->s, nir_convert_from_ssa, true);
186
187 if (ir3_shader_debug & IR3_DBG_DISASM) {
188 printf("dump nir%dv%d: type=%d, k={cts=%u,hp=%u}",
189 so->shader->id, so->id, so->type,
190 so->key.color_two_side, so->key.half_precision);
191 nir_print_shader(ctx->s, stdout);
192 }
193
194 if (shader_debug_enabled(so->type)) {
195 fprintf(stderr, "NIR (final form) for %s shader:\n",
196 _mesa_shader_stage_to_string(so->type));
197 nir_print_shader(ctx->s, stderr);
198 }
199
200 ir3_nir_scan_driver_consts(ctx->s, &so->const_layout);
201
202 so->num_uniforms = ctx->s->num_uniforms;
203 so->num_ubos = ctx->s->info.num_ubos;
204
205 /* Layout of constant registers, each section aligned to vec4. Note
206 * that pointer size (ubo, etc) changes depending on generation.
207 *
208 * user consts
209 * UBO addresses
210 * SSBO sizes
211 * if (vertex shader) {
212 * driver params (IR3_DP_*)
213 * if (stream_output.num_outputs > 0)
214 * stream-out addresses
215 * }
216 * immediates
217 *
218 * Immediates go last mostly because they are inserted in the CP pass
219 * after the nir -> ir3 frontend.
220 */
221 unsigned constoff = align(ctx->s->num_uniforms, 4);
222 unsigned ptrsz = pointer_size(ctx);
223
224 memset(&so->constbase, ~0, sizeof(so->constbase));
225
226 if (so->num_ubos > 0) {
227 so->constbase.ubo = constoff;
228 constoff += align(ctx->s->info.num_ubos * ptrsz, 4) / 4;
229 }
230
231 if (so->const_layout.ssbo_size.count > 0) {
232 unsigned cnt = so->const_layout.ssbo_size.count;
233 so->constbase.ssbo_sizes = constoff;
234 constoff += align(cnt, 4) / 4;
235 }
236
237 if (so->const_layout.image_dims.count > 0) {
238 unsigned cnt = so->const_layout.image_dims.count;
239 so->constbase.image_dims = constoff;
240 constoff += align(cnt, 4) / 4;
241 }
242
243 unsigned num_driver_params = 0;
244 if (so->type == MESA_SHADER_VERTEX) {
245 num_driver_params = IR3_DP_VS_COUNT;
246 } else if (so->type == MESA_SHADER_COMPUTE) {
247 num_driver_params = IR3_DP_CS_COUNT;
248 }
249
250 so->constbase.driver_param = constoff;
251 constoff += align(num_driver_params, 4) / 4;
252
253 if ((so->type == MESA_SHADER_VERTEX) &&
254 (compiler->gpu_id < 500) &&
255 so->shader->stream_output.num_outputs > 0) {
256 so->constbase.tfbo = constoff;
257 constoff += align(IR3_MAX_SO_BUFFERS * ptrsz, 4) / 4;
258 }
259
260 so->constbase.immediate = constoff;
261
262 return ctx;
263 }
264
265 static void
266 compile_error(struct ir3_context *ctx, const char *format, ...)
267 {
268 struct hash_table *errors = NULL;
269 va_list ap;
270 va_start(ap, format);
271 if (ctx->cur_instr) {
272 errors = _mesa_hash_table_create(NULL,
273 _mesa_hash_pointer,
274 _mesa_key_pointer_equal);
275 char *msg = ralloc_vasprintf(errors, format, ap);
276 _mesa_hash_table_insert(errors, ctx->cur_instr, msg);
277 } else {
278 _debug_vprintf(format, ap);
279 }
280 va_end(ap);
281 nir_print_shader_annotated(ctx->s, stdout, errors);
282 ralloc_free(errors);
283 ctx->error = true;
284 debug_assert(0);
285 }
286
287 #define compile_assert(ctx, cond) do { \
288 if (!(cond)) compile_error((ctx), "failed assert: "#cond"\n"); \
289 } while (0)
290
291 static void
292 compile_free(struct ir3_context *ctx)
293 {
294 ralloc_free(ctx);
295 }
296
297 static void
298 declare_array(struct ir3_context *ctx, nir_register *reg)
299 {
300 struct ir3_array *arr = rzalloc(ctx, struct ir3_array);
301 arr->id = ++ctx->num_arrays;
302 /* NOTE: sometimes we get non array regs, for example for arrays of
303 * length 1. See fs-const-array-of-struct-of-array.shader_test. So
304 * treat a non-array as if it was an array of length 1.
305 *
306 * It would be nice if there was a nir pass to convert arrays of
307 * length 1 to ssa.
308 */
309 arr->length = reg->num_components * MAX2(1, reg->num_array_elems);
310 compile_assert(ctx, arr->length > 0);
311 arr->r = reg;
312 list_addtail(&arr->node, &ctx->ir->array_list);
313 }
314
315 static struct ir3_array *
316 get_array(struct ir3_context *ctx, nir_register *reg)
317 {
318 list_for_each_entry (struct ir3_array, arr, &ctx->ir->array_list, node) {
319 if (arr->r == reg)
320 return arr;
321 }
322 compile_error(ctx, "bogus reg: %s\n", reg->name);
323 return NULL;
324 }
325
326 /* relative (indirect) if address!=NULL */
327 static struct ir3_instruction *
328 create_array_load(struct ir3_context *ctx, struct ir3_array *arr, int n,
329 struct ir3_instruction *address)
330 {
331 struct ir3_block *block = ctx->block;
332 struct ir3_instruction *mov;
333 struct ir3_register *src;
334
335 mov = ir3_instr_create(block, OPC_MOV);
336 mov->cat1.src_type = TYPE_U32;
337 mov->cat1.dst_type = TYPE_U32;
338 mov->barrier_class = IR3_BARRIER_ARRAY_R;
339 mov->barrier_conflict = IR3_BARRIER_ARRAY_W;
340 ir3_reg_create(mov, 0, 0);
341 src = ir3_reg_create(mov, 0, IR3_REG_ARRAY |
342 COND(address, IR3_REG_RELATIV));
343 src->instr = arr->last_write;
344 src->size = arr->length;
345 src->array.id = arr->id;
346 src->array.offset = n;
347
348 if (address)
349 ir3_instr_set_address(mov, address);
350
351 return mov;
352 }
353
354 /* relative (indirect) if address!=NULL */
355 static void
356 create_array_store(struct ir3_context *ctx, struct ir3_array *arr, int n,
357 struct ir3_instruction *src, struct ir3_instruction *address)
358 {
359 struct ir3_block *block = ctx->block;
360 struct ir3_instruction *mov;
361 struct ir3_register *dst;
362
363 /* if not relative store, don't create an extra mov, since that
364 * ends up being difficult for cp to remove.
365 */
366 if (!address) {
367 dst = src->regs[0];
368
369 src->barrier_class |= IR3_BARRIER_ARRAY_W;
370 src->barrier_conflict |= IR3_BARRIER_ARRAY_R | IR3_BARRIER_ARRAY_W;
371
372 dst->flags |= IR3_REG_ARRAY;
373 dst->instr = arr->last_write;
374 dst->size = arr->length;
375 dst->array.id = arr->id;
376 dst->array.offset = n;
377
378 arr->last_write = src;
379
380 array_insert(block, block->keeps, src);
381
382 return;
383 }
384
385 mov = ir3_instr_create(block, OPC_MOV);
386 mov->cat1.src_type = TYPE_U32;
387 mov->cat1.dst_type = TYPE_U32;
388 mov->barrier_class = IR3_BARRIER_ARRAY_W;
389 mov->barrier_conflict = IR3_BARRIER_ARRAY_R | IR3_BARRIER_ARRAY_W;
390 dst = ir3_reg_create(mov, 0, IR3_REG_ARRAY |
391 COND(address, IR3_REG_RELATIV));
392 dst->instr = arr->last_write;
393 dst->size = arr->length;
394 dst->array.id = arr->id;
395 dst->array.offset = n;
396 ir3_reg_create(mov, 0, IR3_REG_SSA)->instr = src;
397
398 if (address)
399 ir3_instr_set_address(mov, address);
400
401 arr->last_write = mov;
402
403 /* the array store may only matter to something in an earlier
404 * block (ie. loops), but since arrays are not in SSA, depth
405 * pass won't know this.. so keep all array stores:
406 */
407 array_insert(block, block->keeps, mov);
408 }
409
410 static inline type_t utype_for_size(unsigned bit_size)
411 {
412 switch (bit_size) {
413 case 32: return TYPE_U32;
414 case 16: return TYPE_U16;
415 case 8: return TYPE_U8;
416 default: unreachable("bad bitsize"); return ~0;
417 }
418 }
419
420 static inline type_t utype_src(nir_src src)
421 { return utype_for_size(nir_src_bit_size(src)); }
422
423 static inline type_t utype_dst(nir_dest dst)
424 { return utype_for_size(nir_dest_bit_size(dst)); }
425
426 /* allocate a n element value array (to be populated by caller) and
427 * insert in def_ht
428 */
429 static struct ir3_instruction **
430 get_dst_ssa(struct ir3_context *ctx, nir_ssa_def *dst, unsigned n)
431 {
432 struct ir3_instruction **value =
433 ralloc_array(ctx->def_ht, struct ir3_instruction *, n);
434 _mesa_hash_table_insert(ctx->def_ht, dst, value);
435 return value;
436 }
437
438 static struct ir3_instruction **
439 get_dst(struct ir3_context *ctx, nir_dest *dst, unsigned n)
440 {
441 struct ir3_instruction **value;
442
443 if (dst->is_ssa) {
444 value = get_dst_ssa(ctx, &dst->ssa, n);
445 } else {
446 value = ralloc_array(ctx, struct ir3_instruction *, n);
447 }
448
449 /* NOTE: in non-ssa case, we don't really need to store last_dst
450 * but this helps us catch cases where put_dst() call is forgotten
451 */
452 compile_assert(ctx, !ctx->last_dst);
453 ctx->last_dst = value;
454 ctx->last_dst_n = n;
455
456 return value;
457 }
458
459 static struct ir3_instruction * get_addr(struct ir3_context *ctx, struct ir3_instruction *src, int align);
460
461 static struct ir3_instruction * const *
462 get_src(struct ir3_context *ctx, nir_src *src)
463 {
464 if (src->is_ssa) {
465 struct hash_entry *entry;
466 entry = _mesa_hash_table_search(ctx->def_ht, src->ssa);
467 compile_assert(ctx, entry);
468 return entry->data;
469 } else {
470 nir_register *reg = src->reg.reg;
471 struct ir3_array *arr = get_array(ctx, reg);
472 unsigned num_components = arr->r->num_components;
473 struct ir3_instruction *addr = NULL;
474 struct ir3_instruction **value =
475 ralloc_array(ctx, struct ir3_instruction *, num_components);
476
477 if (src->reg.indirect)
478 addr = get_addr(ctx, get_src(ctx, src->reg.indirect)[0],
479 reg->num_components);
480
481 for (unsigned i = 0; i < num_components; i++) {
482 unsigned n = src->reg.base_offset * reg->num_components + i;
483 compile_assert(ctx, n < arr->length);
484 value[i] = create_array_load(ctx, arr, n, addr);
485 }
486
487 return value;
488 }
489 }
490
491 static void
492 put_dst(struct ir3_context *ctx, nir_dest *dst)
493 {
494 unsigned bit_size = nir_dest_bit_size(*dst);
495
496 if (bit_size < 32) {
497 for (unsigned i = 0; i < ctx->last_dst_n; i++) {
498 struct ir3_instruction *dst = ctx->last_dst[i];
499 dst->regs[0]->flags |= IR3_REG_HALF;
500 if (ctx->last_dst[i]->opc == OPC_META_FO)
501 dst->regs[1]->instr->regs[0]->flags |= IR3_REG_HALF;
502 }
503 }
504
505 if (!dst->is_ssa) {
506 nir_register *reg = dst->reg.reg;
507 struct ir3_array *arr = get_array(ctx, reg);
508 unsigned num_components = ctx->last_dst_n;
509 struct ir3_instruction *addr = NULL;
510
511 if (dst->reg.indirect)
512 addr = get_addr(ctx, get_src(ctx, dst->reg.indirect)[0],
513 reg->num_components);
514
515 for (unsigned i = 0; i < num_components; i++) {
516 unsigned n = dst->reg.base_offset * reg->num_components + i;
517 compile_assert(ctx, n < arr->length);
518 if (!ctx->last_dst[i])
519 continue;
520 create_array_store(ctx, arr, n, ctx->last_dst[i], addr);
521 }
522
523 ralloc_free(ctx->last_dst);
524 }
525 ctx->last_dst = NULL;
526 ctx->last_dst_n = 0;
527 }
528
529 static struct ir3_instruction *
530 create_immed_typed(struct ir3_block *block, uint32_t val, type_t type)
531 {
532 struct ir3_instruction *mov;
533 unsigned flags = (type_size(type) < 32) ? IR3_REG_HALF : 0;
534
535 mov = ir3_instr_create(block, OPC_MOV);
536 mov->cat1.src_type = type;
537 mov->cat1.dst_type = type;
538 ir3_reg_create(mov, 0, flags);
539 ir3_reg_create(mov, 0, IR3_REG_IMMED)->uim_val = val;
540
541 return mov;
542 }
543
544 static struct ir3_instruction *
545 create_immed(struct ir3_block *block, uint32_t val)
546 {
547 return create_immed_typed(block, val, TYPE_U32);
548 }
549
550 static struct ir3_instruction *
551 create_addr(struct ir3_block *block, struct ir3_instruction *src, int align)
552 {
553 struct ir3_instruction *instr, *immed;
554
555 /* TODO in at least some cases, the backend could probably be
556 * made clever enough to propagate IR3_REG_HALF..
557 */
558 instr = ir3_COV(block, src, TYPE_U32, TYPE_S16);
559 instr->regs[0]->flags |= IR3_REG_HALF;
560
561 switch(align){
562 case 1:
563 /* src *= 1: */
564 break;
565 case 2:
566 /* src *= 2 => src <<= 1: */
567 immed = create_immed(block, 1);
568 immed->regs[0]->flags |= IR3_REG_HALF;
569
570 instr = ir3_SHL_B(block, instr, 0, immed, 0);
571 instr->regs[0]->flags |= IR3_REG_HALF;
572 instr->regs[1]->flags |= IR3_REG_HALF;
573 break;
574 case 3:
575 /* src *= 3: */
576 immed = create_immed(block, 3);
577 immed->regs[0]->flags |= IR3_REG_HALF;
578
579 instr = ir3_MULL_U(block, instr, 0, immed, 0);
580 instr->regs[0]->flags |= IR3_REG_HALF;
581 instr->regs[1]->flags |= IR3_REG_HALF;
582 break;
583 case 4:
584 /* src *= 4 => src <<= 2: */
585 immed = create_immed(block, 2);
586 immed->regs[0]->flags |= IR3_REG_HALF;
587
588 instr = ir3_SHL_B(block, instr, 0, immed, 0);
589 instr->regs[0]->flags |= IR3_REG_HALF;
590 instr->regs[1]->flags |= IR3_REG_HALF;
591 break;
592 default:
593 unreachable("bad align");
594 return NULL;
595 }
596
597 instr = ir3_MOV(block, instr, TYPE_S16);
598 instr->regs[0]->num = regid(REG_A0, 0);
599 instr->regs[0]->flags |= IR3_REG_HALF;
600 instr->regs[1]->flags |= IR3_REG_HALF;
601
602 return instr;
603 }
604
605 /* caches addr values to avoid generating multiple cov/shl/mova
606 * sequences for each use of a given NIR level src as address
607 */
608 static struct ir3_instruction *
609 get_addr(struct ir3_context *ctx, struct ir3_instruction *src, int align)
610 {
611 struct ir3_instruction *addr;
612 unsigned idx = align - 1;
613
614 compile_assert(ctx, idx < ARRAY_SIZE(ctx->addr_ht));
615
616 if (!ctx->addr_ht[idx]) {
617 ctx->addr_ht[idx] = _mesa_hash_table_create(ctx,
618 _mesa_hash_pointer, _mesa_key_pointer_equal);
619 } else {
620 struct hash_entry *entry;
621 entry = _mesa_hash_table_search(ctx->addr_ht[idx], src);
622 if (entry)
623 return entry->data;
624 }
625
626 addr = create_addr(ctx->block, src, align);
627 _mesa_hash_table_insert(ctx->addr_ht[idx], src, addr);
628
629 return addr;
630 }
631
632 static struct ir3_instruction *
633 get_predicate(struct ir3_context *ctx, struct ir3_instruction *src)
634 {
635 struct ir3_block *b = ctx->block;
636 struct ir3_instruction *cond;
637
638 /* NOTE: only cmps.*.* can write p0.x: */
639 cond = ir3_CMPS_S(b, src, 0, create_immed(b, 0), 0);
640 cond->cat2.condition = IR3_COND_NE;
641
642 /* condition always goes in predicate register: */
643 cond->regs[0]->num = regid(REG_P0, 0);
644
645 return cond;
646 }
647
648 static struct ir3_instruction *
649 create_uniform(struct ir3_context *ctx, unsigned n)
650 {
651 struct ir3_instruction *mov;
652
653 mov = ir3_instr_create(ctx->block, OPC_MOV);
654 /* TODO get types right? */
655 mov->cat1.src_type = TYPE_F32;
656 mov->cat1.dst_type = TYPE_F32;
657 ir3_reg_create(mov, 0, 0);
658 ir3_reg_create(mov, n, IR3_REG_CONST);
659
660 return mov;
661 }
662
663 static struct ir3_instruction *
664 create_uniform_indirect(struct ir3_context *ctx, int n,
665 struct ir3_instruction *address)
666 {
667 struct ir3_instruction *mov;
668
669 mov = ir3_instr_create(ctx->block, OPC_MOV);
670 mov->cat1.src_type = TYPE_U32;
671 mov->cat1.dst_type = TYPE_U32;
672 ir3_reg_create(mov, 0, 0);
673 ir3_reg_create(mov, 0, IR3_REG_CONST | IR3_REG_RELATIV)->array.offset = n;
674
675 ir3_instr_set_address(mov, address);
676
677 return mov;
678 }
679
680 static struct ir3_instruction *
681 create_collect(struct ir3_context *ctx, struct ir3_instruction *const *arr,
682 unsigned arrsz)
683 {
684 struct ir3_block *block = ctx->block;
685 struct ir3_instruction *collect;
686
687 if (arrsz == 0)
688 return NULL;
689
690 unsigned flags = arr[0]->regs[0]->flags & IR3_REG_HALF;
691
692 collect = ir3_instr_create2(block, OPC_META_FI, 1 + arrsz);
693 ir3_reg_create(collect, 0, flags); /* dst */
694 for (unsigned i = 0; i < arrsz; i++) {
695 struct ir3_instruction *elem = arr[i];
696
697 /* Since arrays are pre-colored in RA, we can't assume that
698 * things will end up in the right place. (Ie. if a collect
699 * joins elements from two different arrays.) So insert an
700 * extra mov.
701 *
702 * We could possibly skip this if all the collected elements
703 * are contiguous elements in a single array.. not sure how
704 * likely that is to happen.
705 *
706 * Fixes a problem with glamor shaders, that in effect do
707 * something like:
708 *
709 * if (foo)
710 * texcoord = ..
711 * else
712 * texcoord = ..
713 * color = texture2D(tex, texcoord);
714 *
715 * In this case, texcoord will end up as nir registers (which
716 * translate to ir3 array's of length 1. And we can't assume
717 * the two (or more) arrays will get allocated in consecutive
718 * scalar registers.
719 *
720 */
721 if (elem->regs[0]->flags & IR3_REG_ARRAY) {
722 type_t type = (flags & IR3_REG_HALF) ? TYPE_U16 : TYPE_U32;
723 elem = ir3_MOV(block, elem, type);
724 }
725
726 compile_assert(ctx, (elem->regs[0]->flags & IR3_REG_HALF) == flags);
727 ir3_reg_create(collect, 0, IR3_REG_SSA | flags)->instr = elem;
728 }
729
730 return collect;
731 }
732
733 static struct ir3_instruction *
734 create_indirect_load(struct ir3_context *ctx, unsigned arrsz, int n,
735 struct ir3_instruction *address, struct ir3_instruction *collect)
736 {
737 struct ir3_block *block = ctx->block;
738 struct ir3_instruction *mov;
739 struct ir3_register *src;
740
741 mov = ir3_instr_create(block, OPC_MOV);
742 mov->cat1.src_type = TYPE_U32;
743 mov->cat1.dst_type = TYPE_U32;
744 ir3_reg_create(mov, 0, 0);
745 src = ir3_reg_create(mov, 0, IR3_REG_SSA | IR3_REG_RELATIV);
746 src->instr = collect;
747 src->size = arrsz;
748 src->array.offset = n;
749
750 ir3_instr_set_address(mov, address);
751
752 return mov;
753 }
754
755 static struct ir3_instruction *
756 create_input_compmask(struct ir3_context *ctx, unsigned n, unsigned compmask)
757 {
758 struct ir3_instruction *in;
759
760 in = ir3_instr_create(ctx->in_block, OPC_META_INPUT);
761 in->inout.block = ctx->in_block;
762 ir3_reg_create(in, n, 0);
763
764 in->regs[0]->wrmask = compmask;
765
766 return in;
767 }
768
769 static struct ir3_instruction *
770 create_input(struct ir3_context *ctx, unsigned n)
771 {
772 return create_input_compmask(ctx, n, 0x1);
773 }
774
775 static struct ir3_instruction *
776 create_frag_input(struct ir3_context *ctx, bool use_ldlv)
777 {
778 struct ir3_block *block = ctx->block;
779 struct ir3_instruction *instr;
780 /* actual inloc is assigned and fixed up later: */
781 struct ir3_instruction *inloc = create_immed(block, 0);
782
783 if (use_ldlv) {
784 instr = ir3_LDLV(block, inloc, 0, create_immed(block, 1), 0);
785 instr->cat6.type = TYPE_U32;
786 instr->cat6.iim_val = 1;
787 } else {
788 instr = ir3_BARY_F(block, inloc, 0, ctx->frag_vcoord, 0);
789 instr->regs[2]->wrmask = 0x3;
790 }
791
792 return instr;
793 }
794
795 static struct ir3_instruction *
796 create_driver_param(struct ir3_context *ctx, enum ir3_driver_param dp)
797 {
798 /* first four vec4 sysval's reserved for UBOs: */
799 /* NOTE: dp is in scalar, but there can be >4 dp components: */
800 unsigned n = ctx->so->constbase.driver_param;
801 unsigned r = regid(n + dp / 4, dp % 4);
802 return create_uniform(ctx, r);
803 }
804
805 /* helper for instructions that produce multiple consecutive scalar
806 * outputs which need to have a split/fanout meta instruction inserted
807 */
808 static void
809 split_dest(struct ir3_block *block, struct ir3_instruction **dst,
810 struct ir3_instruction *src, unsigned base, unsigned n)
811 {
812 struct ir3_instruction *prev = NULL;
813
814 if ((n == 1) && (src->regs[0]->wrmask == 0x1)) {
815 dst[0] = src;
816 return;
817 }
818
819 for (int i = 0, j = 0; i < n; i++) {
820 struct ir3_instruction *split = ir3_instr_create(block, OPC_META_FO);
821 ir3_reg_create(split, 0, IR3_REG_SSA);
822 ir3_reg_create(split, 0, IR3_REG_SSA)->instr = src;
823 split->fo.off = i + base;
824
825 if (prev) {
826 split->cp.left = prev;
827 split->cp.left_cnt++;
828 prev->cp.right = split;
829 prev->cp.right_cnt++;
830 }
831 prev = split;
832
833 if (src->regs[0]->wrmask & (1 << (i + base)))
834 dst[j++] = split;
835 }
836 }
837
838 /*
839 * Adreno uses uint rather than having dedicated bool type,
840 * which (potentially) requires some conversion, in particular
841 * when using output of an bool instr to int input, or visa
842 * versa.
843 *
844 * | Adreno | NIR |
845 * -------+---------+-------+-
846 * true | 1 | ~0 |
847 * false | 0 | 0 |
848 *
849 * To convert from an adreno bool (uint) to nir, use:
850 *
851 * absneg.s dst, (neg)src
852 *
853 * To convert back in the other direction:
854 *
855 * absneg.s dst, (abs)arc
856 *
857 * The CP step can clean up the absneg.s that cancel each other
858 * out, and with a slight bit of extra cleverness (to recognize
859 * the instructions which produce either a 0 or 1) can eliminate
860 * the absneg.s's completely when an instruction that wants
861 * 0/1 consumes the result. For example, when a nir 'bcsel'
862 * consumes the result of 'feq'. So we should be able to get by
863 * without a boolean resolve step, and without incuring any
864 * extra penalty in instruction count.
865 */
866
867 /* NIR bool -> native (adreno): */
868 static struct ir3_instruction *
869 ir3_b2n(struct ir3_block *block, struct ir3_instruction *instr)
870 {
871 return ir3_ABSNEG_S(block, instr, IR3_REG_SABS);
872 }
873
874 /* native (adreno) -> NIR bool: */
875 static struct ir3_instruction *
876 ir3_n2b(struct ir3_block *block, struct ir3_instruction *instr)
877 {
878 return ir3_ABSNEG_S(block, instr, IR3_REG_SNEG);
879 }
880
881 /*
882 * alu/sfu instructions:
883 */
884
885 static struct ir3_instruction *
886 create_cov(struct ir3_context *ctx, struct ir3_instruction *src,
887 unsigned src_bitsize, nir_op op)
888 {
889 type_t src_type, dst_type;
890
891 switch (op) {
892 case nir_op_f2f32:
893 case nir_op_f2f16_rtne:
894 case nir_op_f2f16_rtz:
895 case nir_op_f2f16:
896 case nir_op_f2i32:
897 case nir_op_f2i16:
898 case nir_op_f2i8:
899 case nir_op_f2u32:
900 case nir_op_f2u16:
901 case nir_op_f2u8:
902 switch (src_bitsize) {
903 case 32:
904 src_type = TYPE_F32;
905 break;
906 case 16:
907 src_type = TYPE_F16;
908 break;
909 default:
910 compile_error(ctx, "invalid src bit size: %u", src_bitsize);
911 }
912 break;
913
914 case nir_op_i2f32:
915 case nir_op_i2f16:
916 case nir_op_i2i32:
917 case nir_op_i2i16:
918 case nir_op_i2i8:
919 switch (src_bitsize) {
920 case 32:
921 src_type = TYPE_S32;
922 break;
923 case 16:
924 src_type = TYPE_S16;
925 break;
926 case 8:
927 src_type = TYPE_S8;
928 break;
929 default:
930 compile_error(ctx, "invalid src bit size: %u", src_bitsize);
931 }
932 break;
933
934 case nir_op_u2f32:
935 case nir_op_u2f16:
936 case nir_op_u2u32:
937 case nir_op_u2u16:
938 case nir_op_u2u8:
939 switch (src_bitsize) {
940 case 32:
941 src_type = TYPE_U32;
942 break;
943 case 16:
944 src_type = TYPE_U16;
945 break;
946 case 8:
947 src_type = TYPE_U8;
948 break;
949 default:
950 compile_error(ctx, "invalid src bit size: %u", src_bitsize);
951 }
952 break;
953
954 default:
955 compile_error(ctx, "invalid conversion op: %u", op);
956 }
957
958 switch (op) {
959 case nir_op_f2f32:
960 case nir_op_i2f32:
961 case nir_op_u2f32:
962 dst_type = TYPE_F32;
963 break;
964
965 case nir_op_f2f16_rtne:
966 case nir_op_f2f16_rtz:
967 case nir_op_f2f16:
968 /* TODO how to handle rounding mode? */
969 case nir_op_i2f16:
970 case nir_op_u2f16:
971 dst_type = TYPE_F16;
972 break;
973
974 case nir_op_f2i32:
975 case nir_op_i2i32:
976 dst_type = TYPE_S32;
977 break;
978
979 case nir_op_f2i16:
980 case nir_op_i2i16:
981 dst_type = TYPE_S16;
982 break;
983
984 case nir_op_f2i8:
985 case nir_op_i2i8:
986 dst_type = TYPE_S8;
987 break;
988
989 case nir_op_f2u32:
990 case nir_op_u2u32:
991 dst_type = TYPE_U32;
992 break;
993
994 case nir_op_f2u16:
995 case nir_op_u2u16:
996 dst_type = TYPE_U16;
997 break;
998
999 case nir_op_f2u8:
1000 case nir_op_u2u8:
1001 dst_type = TYPE_U8;
1002 break;
1003
1004 default:
1005 compile_error(ctx, "invalid conversion op: %u", op);
1006 }
1007
1008 return ir3_COV(ctx->block, src, src_type, dst_type);
1009 }
1010
1011 static void
1012 emit_alu(struct ir3_context *ctx, nir_alu_instr *alu)
1013 {
1014 const nir_op_info *info = &nir_op_infos[alu->op];
1015 struct ir3_instruction **dst, *src[info->num_inputs];
1016 unsigned bs[info->num_inputs]; /* bit size */
1017 struct ir3_block *b = ctx->block;
1018 unsigned dst_sz, wrmask;
1019
1020 if (alu->dest.dest.is_ssa) {
1021 dst_sz = alu->dest.dest.ssa.num_components;
1022 wrmask = (1 << dst_sz) - 1;
1023 } else {
1024 dst_sz = alu->dest.dest.reg.reg->num_components;
1025 wrmask = alu->dest.write_mask;
1026 }
1027
1028 dst = get_dst(ctx, &alu->dest.dest, dst_sz);
1029
1030 /* Vectors are special in that they have non-scalarized writemasks,
1031 * and just take the first swizzle channel for each argument in
1032 * order into each writemask channel.
1033 */
1034 if ((alu->op == nir_op_vec2) ||
1035 (alu->op == nir_op_vec3) ||
1036 (alu->op == nir_op_vec4)) {
1037
1038 for (int i = 0; i < info->num_inputs; i++) {
1039 nir_alu_src *asrc = &alu->src[i];
1040
1041 compile_assert(ctx, !asrc->abs);
1042 compile_assert(ctx, !asrc->negate);
1043
1044 src[i] = get_src(ctx, &asrc->src)[asrc->swizzle[0]];
1045 if (!src[i])
1046 src[i] = create_immed(ctx->block, 0);
1047 dst[i] = ir3_MOV(b, src[i], TYPE_U32);
1048 }
1049
1050 put_dst(ctx, &alu->dest.dest);
1051 return;
1052 }
1053
1054 /* We also get mov's with more than one component for mov's so
1055 * handle those specially:
1056 */
1057 if ((alu->op == nir_op_imov) || (alu->op == nir_op_fmov)) {
1058 type_t type = (alu->op == nir_op_imov) ? TYPE_U32 : TYPE_F32;
1059 nir_alu_src *asrc = &alu->src[0];
1060 struct ir3_instruction *const *src0 = get_src(ctx, &asrc->src);
1061
1062 for (unsigned i = 0; i < dst_sz; i++) {
1063 if (wrmask & (1 << i)) {
1064 dst[i] = ir3_MOV(b, src0[asrc->swizzle[i]], type);
1065 } else {
1066 dst[i] = NULL;
1067 }
1068 }
1069
1070 put_dst(ctx, &alu->dest.dest);
1071 return;
1072 }
1073
1074 /* General case: We can just grab the one used channel per src. */
1075 for (int i = 0; i < info->num_inputs; i++) {
1076 unsigned chan = ffs(alu->dest.write_mask) - 1;
1077 nir_alu_src *asrc = &alu->src[i];
1078
1079 compile_assert(ctx, !asrc->abs);
1080 compile_assert(ctx, !asrc->negate);
1081
1082 src[i] = get_src(ctx, &asrc->src)[asrc->swizzle[chan]];
1083 bs[i] = nir_src_bit_size(asrc->src);
1084
1085 compile_assert(ctx, src[i]);
1086 }
1087
1088 switch (alu->op) {
1089 case nir_op_f2f32:
1090 case nir_op_f2f16_rtne:
1091 case nir_op_f2f16_rtz:
1092 case nir_op_f2f16:
1093 case nir_op_f2i32:
1094 case nir_op_f2i16:
1095 case nir_op_f2i8:
1096 case nir_op_f2u32:
1097 case nir_op_f2u16:
1098 case nir_op_f2u8:
1099 case nir_op_i2f32:
1100 case nir_op_i2f16:
1101 case nir_op_i2i32:
1102 case nir_op_i2i16:
1103 case nir_op_i2i8:
1104 case nir_op_u2f32:
1105 case nir_op_u2f16:
1106 case nir_op_u2u32:
1107 case nir_op_u2u16:
1108 case nir_op_u2u8:
1109 dst[0] = create_cov(ctx, src[0], bs[0], alu->op);
1110 break;
1111 case nir_op_f2b32:
1112 dst[0] = ir3_CMPS_F(b, src[0], 0, create_immed(b, fui(0.0)), 0);
1113 dst[0]->cat2.condition = IR3_COND_NE;
1114 dst[0] = ir3_n2b(b, dst[0]);
1115 break;
1116 case nir_op_b2f16:
1117 case nir_op_b2f32:
1118 dst[0] = ir3_COV(b, ir3_b2n(b, src[0]), TYPE_U32, TYPE_F32);
1119 break;
1120 case nir_op_b2i8:
1121 case nir_op_b2i16:
1122 case nir_op_b2i32:
1123 dst[0] = ir3_b2n(b, src[0]);
1124 break;
1125 case nir_op_i2b32:
1126 dst[0] = ir3_CMPS_S(b, src[0], 0, create_immed(b, 0), 0);
1127 dst[0]->cat2.condition = IR3_COND_NE;
1128 dst[0] = ir3_n2b(b, dst[0]);
1129 break;
1130
1131 case nir_op_fneg:
1132 dst[0] = ir3_ABSNEG_F(b, src[0], IR3_REG_FNEG);
1133 break;
1134 case nir_op_fabs:
1135 dst[0] = ir3_ABSNEG_F(b, src[0], IR3_REG_FABS);
1136 break;
1137 case nir_op_fmax:
1138 dst[0] = ir3_MAX_F(b, src[0], 0, src[1], 0);
1139 break;
1140 case nir_op_fmin:
1141 dst[0] = ir3_MIN_F(b, src[0], 0, src[1], 0);
1142 break;
1143 case nir_op_fsat:
1144 /* if there is just a single use of the src, and it supports
1145 * (sat) bit, we can just fold the (sat) flag back to the
1146 * src instruction and create a mov. This is easier for cp
1147 * to eliminate.
1148 *
1149 * TODO probably opc_cat==4 is ok too
1150 */
1151 if (alu->src[0].src.is_ssa &&
1152 (list_length(&alu->src[0].src.ssa->uses) == 1) &&
1153 ((opc_cat(src[0]->opc) == 2) || (opc_cat(src[0]->opc) == 3))) {
1154 src[0]->flags |= IR3_INSTR_SAT;
1155 dst[0] = ir3_MOV(b, src[0], TYPE_U32);
1156 } else {
1157 /* otherwise generate a max.f that saturates.. blob does
1158 * similar (generating a cat2 mov using max.f)
1159 */
1160 dst[0] = ir3_MAX_F(b, src[0], 0, src[0], 0);
1161 dst[0]->flags |= IR3_INSTR_SAT;
1162 }
1163 break;
1164 case nir_op_fmul:
1165 dst[0] = ir3_MUL_F(b, src[0], 0, src[1], 0);
1166 break;
1167 case nir_op_fadd:
1168 dst[0] = ir3_ADD_F(b, src[0], 0, src[1], 0);
1169 break;
1170 case nir_op_fsub:
1171 dst[0] = ir3_ADD_F(b, src[0], 0, src[1], IR3_REG_FNEG);
1172 break;
1173 case nir_op_ffma:
1174 dst[0] = ir3_MAD_F32(b, src[0], 0, src[1], 0, src[2], 0);
1175 break;
1176 case nir_op_fddx:
1177 dst[0] = ir3_DSX(b, src[0], 0);
1178 dst[0]->cat5.type = TYPE_F32;
1179 break;
1180 case nir_op_fddy:
1181 dst[0] = ir3_DSY(b, src[0], 0);
1182 dst[0]->cat5.type = TYPE_F32;
1183 break;
1184 break;
1185 case nir_op_flt:
1186 dst[0] = ir3_CMPS_F(b, src[0], 0, src[1], 0);
1187 dst[0]->cat2.condition = IR3_COND_LT;
1188 dst[0] = ir3_n2b(b, dst[0]);
1189 break;
1190 case nir_op_fge:
1191 dst[0] = ir3_CMPS_F(b, src[0], 0, src[1], 0);
1192 dst[0]->cat2.condition = IR3_COND_GE;
1193 dst[0] = ir3_n2b(b, dst[0]);
1194 break;
1195 case nir_op_feq:
1196 dst[0] = ir3_CMPS_F(b, src[0], 0, src[1], 0);
1197 dst[0]->cat2.condition = IR3_COND_EQ;
1198 dst[0] = ir3_n2b(b, dst[0]);
1199 break;
1200 case nir_op_fne:
1201 dst[0] = ir3_CMPS_F(b, src[0], 0, src[1], 0);
1202 dst[0]->cat2.condition = IR3_COND_NE;
1203 dst[0] = ir3_n2b(b, dst[0]);
1204 break;
1205 case nir_op_fceil:
1206 dst[0] = ir3_CEIL_F(b, src[0], 0);
1207 break;
1208 case nir_op_ffloor:
1209 dst[0] = ir3_FLOOR_F(b, src[0], 0);
1210 break;
1211 case nir_op_ftrunc:
1212 dst[0] = ir3_TRUNC_F(b, src[0], 0);
1213 break;
1214 case nir_op_fround_even:
1215 dst[0] = ir3_RNDNE_F(b, src[0], 0);
1216 break;
1217 case nir_op_fsign:
1218 dst[0] = ir3_SIGN_F(b, src[0], 0);
1219 break;
1220
1221 case nir_op_fsin:
1222 dst[0] = ir3_SIN(b, src[0], 0);
1223 break;
1224 case nir_op_fcos:
1225 dst[0] = ir3_COS(b, src[0], 0);
1226 break;
1227 case nir_op_frsq:
1228 dst[0] = ir3_RSQ(b, src[0], 0);
1229 break;
1230 case nir_op_frcp:
1231 dst[0] = ir3_RCP(b, src[0], 0);
1232 break;
1233 case nir_op_flog2:
1234 dst[0] = ir3_LOG2(b, src[0], 0);
1235 break;
1236 case nir_op_fexp2:
1237 dst[0] = ir3_EXP2(b, src[0], 0);
1238 break;
1239 case nir_op_fsqrt:
1240 dst[0] = ir3_SQRT(b, src[0], 0);
1241 break;
1242
1243 case nir_op_iabs:
1244 dst[0] = ir3_ABSNEG_S(b, src[0], IR3_REG_SABS);
1245 break;
1246 case nir_op_iadd:
1247 dst[0] = ir3_ADD_U(b, src[0], 0, src[1], 0);
1248 break;
1249 case nir_op_iand:
1250 dst[0] = ir3_AND_B(b, src[0], 0, src[1], 0);
1251 break;
1252 case nir_op_imax:
1253 dst[0] = ir3_MAX_S(b, src[0], 0, src[1], 0);
1254 break;
1255 case nir_op_umax:
1256 dst[0] = ir3_MAX_U(b, src[0], 0, src[1], 0);
1257 break;
1258 case nir_op_imin:
1259 dst[0] = ir3_MIN_S(b, src[0], 0, src[1], 0);
1260 break;
1261 case nir_op_umin:
1262 dst[0] = ir3_MIN_U(b, src[0], 0, src[1], 0);
1263 break;
1264 case nir_op_imul:
1265 /*
1266 * dst = (al * bl) + (ah * bl << 16) + (al * bh << 16)
1267 * mull.u tmp0, a, b ; mul low, i.e. al * bl
1268 * madsh.m16 tmp1, a, b, tmp0 ; mul-add shift high mix, i.e. ah * bl << 16
1269 * madsh.m16 dst, b, a, tmp1 ; i.e. al * bh << 16
1270 */
1271 dst[0] = ir3_MADSH_M16(b, src[1], 0, src[0], 0,
1272 ir3_MADSH_M16(b, src[0], 0, src[1], 0,
1273 ir3_MULL_U(b, src[0], 0, src[1], 0), 0), 0);
1274 break;
1275 case nir_op_ineg:
1276 dst[0] = ir3_ABSNEG_S(b, src[0], IR3_REG_SNEG);
1277 break;
1278 case nir_op_inot:
1279 dst[0] = ir3_NOT_B(b, src[0], 0);
1280 break;
1281 case nir_op_ior:
1282 dst[0] = ir3_OR_B(b, src[0], 0, src[1], 0);
1283 break;
1284 case nir_op_ishl:
1285 dst[0] = ir3_SHL_B(b, src[0], 0, src[1], 0);
1286 break;
1287 case nir_op_ishr:
1288 dst[0] = ir3_ASHR_B(b, src[0], 0, src[1], 0);
1289 break;
1290 case nir_op_isign: {
1291 /* maybe this would be sane to lower in nir.. */
1292 struct ir3_instruction *neg, *pos;
1293
1294 neg = ir3_CMPS_S(b, src[0], 0, create_immed(b, 0), 0);
1295 neg->cat2.condition = IR3_COND_LT;
1296
1297 pos = ir3_CMPS_S(b, src[0], 0, create_immed(b, 0), 0);
1298 pos->cat2.condition = IR3_COND_GT;
1299
1300 dst[0] = ir3_SUB_U(b, pos, 0, neg, 0);
1301
1302 break;
1303 }
1304 case nir_op_isub:
1305 dst[0] = ir3_SUB_U(b, src[0], 0, src[1], 0);
1306 break;
1307 case nir_op_ixor:
1308 dst[0] = ir3_XOR_B(b, src[0], 0, src[1], 0);
1309 break;
1310 case nir_op_ushr:
1311 dst[0] = ir3_SHR_B(b, src[0], 0, src[1], 0);
1312 break;
1313 case nir_op_ilt:
1314 dst[0] = ir3_CMPS_S(b, src[0], 0, src[1], 0);
1315 dst[0]->cat2.condition = IR3_COND_LT;
1316 dst[0] = ir3_n2b(b, dst[0]);
1317 break;
1318 case nir_op_ige:
1319 dst[0] = ir3_CMPS_S(b, src[0], 0, src[1], 0);
1320 dst[0]->cat2.condition = IR3_COND_GE;
1321 dst[0] = ir3_n2b(b, dst[0]);
1322 break;
1323 case nir_op_ieq:
1324 dst[0] = ir3_CMPS_S(b, src[0], 0, src[1], 0);
1325 dst[0]->cat2.condition = IR3_COND_EQ;
1326 dst[0] = ir3_n2b(b, dst[0]);
1327 break;
1328 case nir_op_ine:
1329 dst[0] = ir3_CMPS_S(b, src[0], 0, src[1], 0);
1330 dst[0]->cat2.condition = IR3_COND_NE;
1331 dst[0] = ir3_n2b(b, dst[0]);
1332 break;
1333 case nir_op_ult:
1334 dst[0] = ir3_CMPS_U(b, src[0], 0, src[1], 0);
1335 dst[0]->cat2.condition = IR3_COND_LT;
1336 dst[0] = ir3_n2b(b, dst[0]);
1337 break;
1338 case nir_op_uge:
1339 dst[0] = ir3_CMPS_U(b, src[0], 0, src[1], 0);
1340 dst[0]->cat2.condition = IR3_COND_GE;
1341 dst[0] = ir3_n2b(b, dst[0]);
1342 break;
1343
1344 case nir_op_bcsel: {
1345 struct ir3_instruction *cond = ir3_b2n(b, src[0]);
1346 compile_assert(ctx, bs[1] == bs[2]);
1347 /* the boolean condition is 32b even if src[1] and src[2] are
1348 * half-precision, but sel.b16 wants all three src's to be the
1349 * same type.
1350 */
1351 if (bs[1] < 32)
1352 cond = ir3_COV(b, cond, TYPE_U32, TYPE_U16);
1353 dst[0] = ir3_SEL_B32(b, src[1], 0, cond, 0, src[2], 0);
1354 break;
1355 }
1356 case nir_op_bit_count:
1357 dst[0] = ir3_CBITS_B(b, src[0], 0);
1358 break;
1359 case nir_op_ifind_msb: {
1360 struct ir3_instruction *cmp;
1361 dst[0] = ir3_CLZ_S(b, src[0], 0);
1362 cmp = ir3_CMPS_S(b, dst[0], 0, create_immed(b, 0), 0);
1363 cmp->cat2.condition = IR3_COND_GE;
1364 dst[0] = ir3_SEL_B32(b,
1365 ir3_SUB_U(b, create_immed(b, 31), 0, dst[0], 0), 0,
1366 cmp, 0, dst[0], 0);
1367 break;
1368 }
1369 case nir_op_ufind_msb:
1370 dst[0] = ir3_CLZ_B(b, src[0], 0);
1371 dst[0] = ir3_SEL_B32(b,
1372 ir3_SUB_U(b, create_immed(b, 31), 0, dst[0], 0), 0,
1373 src[0], 0, dst[0], 0);
1374 break;
1375 case nir_op_find_lsb:
1376 dst[0] = ir3_BFREV_B(b, src[0], 0);
1377 dst[0] = ir3_CLZ_B(b, dst[0], 0);
1378 break;
1379 case nir_op_bitfield_reverse:
1380 dst[0] = ir3_BFREV_B(b, src[0], 0);
1381 break;
1382
1383 default:
1384 compile_error(ctx, "Unhandled ALU op: %s\n",
1385 nir_op_infos[alu->op].name);
1386 break;
1387 }
1388
1389 put_dst(ctx, &alu->dest.dest);
1390 }
1391
1392 /* handles direct/indirect UBO reads: */
1393 static void
1394 emit_intrinsic_load_ubo(struct ir3_context *ctx, nir_intrinsic_instr *intr,
1395 struct ir3_instruction **dst)
1396 {
1397 struct ir3_block *b = ctx->block;
1398 struct ir3_instruction *base_lo, *base_hi, *addr, *src0, *src1;
1399 nir_const_value *const_offset;
1400 /* UBO addresses are the first driver params: */
1401 unsigned ubo = regid(ctx->so->constbase.ubo, 0);
1402 const unsigned ptrsz = pointer_size(ctx);
1403
1404 int off = 0;
1405
1406 /* First src is ubo index, which could either be an immed or not: */
1407 src0 = get_src(ctx, &intr->src[0])[0];
1408 if (is_same_type_mov(src0) &&
1409 (src0->regs[1]->flags & IR3_REG_IMMED)) {
1410 base_lo = create_uniform(ctx, ubo + (src0->regs[1]->iim_val * ptrsz));
1411 base_hi = create_uniform(ctx, ubo + (src0->regs[1]->iim_val * ptrsz) + 1);
1412 } else {
1413 base_lo = create_uniform_indirect(ctx, ubo, get_addr(ctx, src0, 4));
1414 base_hi = create_uniform_indirect(ctx, ubo + 1, get_addr(ctx, src0, 4));
1415 }
1416
1417 /* note: on 32bit gpu's base_hi is ignored and DCE'd */
1418 addr = base_lo;
1419
1420 const_offset = nir_src_as_const_value(intr->src[1]);
1421 if (const_offset) {
1422 off += const_offset->u32[0];
1423 } else {
1424 /* For load_ubo_indirect, second src is indirect offset: */
1425 src1 = get_src(ctx, &intr->src[1])[0];
1426
1427 /* and add offset to addr: */
1428 addr = ir3_ADD_S(b, addr, 0, src1, 0);
1429 }
1430
1431 /* if offset is to large to encode in the ldg, split it out: */
1432 if ((off + (intr->num_components * 4)) > 1024) {
1433 /* split out the minimal amount to improve the odds that
1434 * cp can fit the immediate in the add.s instruction:
1435 */
1436 unsigned off2 = off + (intr->num_components * 4) - 1024;
1437 addr = ir3_ADD_S(b, addr, 0, create_immed(b, off2), 0);
1438 off -= off2;
1439 }
1440
1441 if (ptrsz == 2) {
1442 struct ir3_instruction *carry;
1443
1444 /* handle 32b rollover, ie:
1445 * if (addr < base_lo)
1446 * base_hi++
1447 */
1448 carry = ir3_CMPS_U(b, addr, 0, base_lo, 0);
1449 carry->cat2.condition = IR3_COND_LT;
1450 base_hi = ir3_ADD_S(b, base_hi, 0, carry, 0);
1451
1452 addr = create_collect(ctx, (struct ir3_instruction*[]){ addr, base_hi }, 2);
1453 }
1454
1455 for (int i = 0; i < intr->num_components; i++) {
1456 struct ir3_instruction *load =
1457 ir3_LDG(b, addr, 0, create_immed(b, 1), 0);
1458 load->cat6.type = TYPE_U32;
1459 load->cat6.src_offset = off + i * 4; /* byte offset */
1460 dst[i] = load;
1461 }
1462 }
1463
1464 /* src[] = { buffer_index, offset }. No const_index */
1465 static void
1466 emit_intrinsic_load_ssbo(struct ir3_context *ctx, nir_intrinsic_instr *intr,
1467 struct ir3_instruction **dst)
1468 {
1469 struct ir3_block *b = ctx->block;
1470 struct ir3_instruction *ldgb, *src0, *src1, *offset;
1471 nir_const_value *const_offset;
1472
1473 /* can this be non-const buffer_index? how do we handle that? */
1474 const_offset = nir_src_as_const_value(intr->src[0]);
1475 compile_assert(ctx, const_offset);
1476
1477 offset = get_src(ctx, &intr->src[1])[0];
1478
1479 /* src0 is uvec2(offset*4, 0), src1 is offset.. nir already *= 4: */
1480 src0 = create_collect(ctx, (struct ir3_instruction*[]){
1481 offset,
1482 create_immed(b, 0),
1483 }, 2);
1484 src1 = ir3_SHR_B(b, offset, 0, create_immed(b, 2), 0);
1485
1486 ldgb = ir3_LDGB(b, create_immed(b, const_offset->u32[0]), 0,
1487 src0, 0, src1, 0);
1488 ldgb->regs[0]->wrmask = MASK(intr->num_components);
1489 ldgb->cat6.iim_val = intr->num_components;
1490 ldgb->cat6.d = 4;
1491 ldgb->cat6.type = TYPE_U32;
1492 ldgb->barrier_class = IR3_BARRIER_BUFFER_R;
1493 ldgb->barrier_conflict = IR3_BARRIER_BUFFER_W;
1494
1495 split_dest(b, dst, ldgb, 0, intr->num_components);
1496 }
1497
1498 /* src[] = { value, block_index, offset }. const_index[] = { write_mask } */
1499 static void
1500 emit_intrinsic_store_ssbo(struct ir3_context *ctx, nir_intrinsic_instr *intr)
1501 {
1502 struct ir3_block *b = ctx->block;
1503 struct ir3_instruction *stgb, *src0, *src1, *src2, *offset;
1504 nir_const_value *const_offset;
1505 /* TODO handle wrmask properly, see _store_shared().. but I think
1506 * it is more a PITA than that, since blob ends up loading the
1507 * masked components and writing them back out.
1508 */
1509 unsigned wrmask = intr->const_index[0];
1510 unsigned ncomp = ffs(~wrmask) - 1;
1511
1512 /* can this be non-const buffer_index? how do we handle that? */
1513 const_offset = nir_src_as_const_value(intr->src[1]);
1514 compile_assert(ctx, const_offset);
1515
1516 offset = get_src(ctx, &intr->src[2])[0];
1517
1518 /* src0 is value, src1 is offset, src2 is uvec2(offset*4, 0)..
1519 * nir already *= 4:
1520 */
1521 src0 = create_collect(ctx, get_src(ctx, &intr->src[0]), ncomp);
1522 src1 = ir3_SHR_B(b, offset, 0, create_immed(b, 2), 0);
1523 src2 = create_collect(ctx, (struct ir3_instruction*[]){
1524 offset,
1525 create_immed(b, 0),
1526 }, 2);
1527
1528 stgb = ir3_STGB(b, create_immed(b, const_offset->u32[0]), 0,
1529 src0, 0, src1, 0, src2, 0);
1530 stgb->cat6.iim_val = ncomp;
1531 stgb->cat6.d = 4;
1532 stgb->cat6.type = TYPE_U32;
1533 stgb->barrier_class = IR3_BARRIER_BUFFER_W;
1534 stgb->barrier_conflict = IR3_BARRIER_BUFFER_R | IR3_BARRIER_BUFFER_W;
1535
1536 array_insert(b, b->keeps, stgb);
1537 }
1538
1539 /* src[] = { block_index } */
1540 static void
1541 emit_intrinsic_ssbo_size(struct ir3_context *ctx, nir_intrinsic_instr *intr,
1542 struct ir3_instruction **dst)
1543 {
1544 /* SSBO size stored as a const starting at ssbo_sizes: */
1545 unsigned blk_idx = nir_src_as_const_value(intr->src[0])->u32[0];
1546 unsigned idx = regid(ctx->so->constbase.ssbo_sizes, 0) +
1547 ctx->so->const_layout.ssbo_size.off[blk_idx];
1548
1549 debug_assert(ctx->so->const_layout.ssbo_size.mask & (1 << blk_idx));
1550
1551 dst[0] = create_uniform(ctx, idx);
1552 }
1553
1554 /*
1555 * SSBO atomic intrinsics
1556 *
1557 * All of the SSBO atomic memory operations read a value from memory,
1558 * compute a new value using one of the operations below, write the new
1559 * value to memory, and return the original value read.
1560 *
1561 * All operations take 3 sources except CompSwap that takes 4. These
1562 * sources represent:
1563 *
1564 * 0: The SSBO buffer index.
1565 * 1: The offset into the SSBO buffer of the variable that the atomic
1566 * operation will operate on.
1567 * 2: The data parameter to the atomic function (i.e. the value to add
1568 * in ssbo_atomic_add, etc).
1569 * 3: For CompSwap only: the second data parameter.
1570 */
1571 static struct ir3_instruction *
1572 emit_intrinsic_atomic_ssbo(struct ir3_context *ctx, nir_intrinsic_instr *intr)
1573 {
1574 struct ir3_block *b = ctx->block;
1575 struct ir3_instruction *atomic, *ssbo, *src0, *src1, *src2, *offset;
1576 nir_const_value *const_offset;
1577 type_t type = TYPE_U32;
1578
1579 /* can this be non-const buffer_index? how do we handle that? */
1580 const_offset = nir_src_as_const_value(intr->src[0]);
1581 compile_assert(ctx, const_offset);
1582 ssbo = create_immed(b, const_offset->u32[0]);
1583
1584 offset = get_src(ctx, &intr->src[1])[0];
1585
1586 /* src0 is data (or uvec2(data, compare))
1587 * src1 is offset
1588 * src2 is uvec2(offset*4, 0) (appears to be 64b byte offset)
1589 *
1590 * Note that nir already multiplies the offset by four
1591 */
1592 src0 = get_src(ctx, &intr->src[2])[0];
1593 src1 = ir3_SHR_B(b, offset, 0, create_immed(b, 2), 0);
1594 src2 = create_collect(ctx, (struct ir3_instruction*[]){
1595 offset,
1596 create_immed(b, 0),
1597 }, 2);
1598
1599 switch (intr->intrinsic) {
1600 case nir_intrinsic_ssbo_atomic_add:
1601 atomic = ir3_ATOMIC_ADD_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
1602 break;
1603 case nir_intrinsic_ssbo_atomic_imin:
1604 atomic = ir3_ATOMIC_MIN_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
1605 type = TYPE_S32;
1606 break;
1607 case nir_intrinsic_ssbo_atomic_umin:
1608 atomic = ir3_ATOMIC_MIN_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
1609 break;
1610 case nir_intrinsic_ssbo_atomic_imax:
1611 atomic = ir3_ATOMIC_MAX_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
1612 type = TYPE_S32;
1613 break;
1614 case nir_intrinsic_ssbo_atomic_umax:
1615 atomic = ir3_ATOMIC_MAX_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
1616 break;
1617 case nir_intrinsic_ssbo_atomic_and:
1618 atomic = ir3_ATOMIC_AND_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
1619 break;
1620 case nir_intrinsic_ssbo_atomic_or:
1621 atomic = ir3_ATOMIC_OR_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
1622 break;
1623 case nir_intrinsic_ssbo_atomic_xor:
1624 atomic = ir3_ATOMIC_XOR_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
1625 break;
1626 case nir_intrinsic_ssbo_atomic_exchange:
1627 atomic = ir3_ATOMIC_XCHG_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
1628 break;
1629 case nir_intrinsic_ssbo_atomic_comp_swap:
1630 /* for cmpxchg, src0 is [ui]vec2(data, compare): */
1631 src0 = create_collect(ctx, (struct ir3_instruction*[]){
1632 get_src(ctx, &intr->src[3])[0],
1633 src0,
1634 }, 2);
1635 atomic = ir3_ATOMIC_CMPXCHG_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
1636 break;
1637 default:
1638 unreachable("boo");
1639 }
1640
1641 atomic->cat6.iim_val = 1;
1642 atomic->cat6.d = 4;
1643 atomic->cat6.type = type;
1644 atomic->barrier_class = IR3_BARRIER_BUFFER_W;
1645 atomic->barrier_conflict = IR3_BARRIER_BUFFER_R | IR3_BARRIER_BUFFER_W;
1646
1647 /* even if nothing consume the result, we can't DCE the instruction: */
1648 array_insert(b, b->keeps, atomic);
1649
1650 return atomic;
1651 }
1652
1653 /* src[] = { offset }. const_index[] = { base } */
1654 static void
1655 emit_intrinsic_load_shared(struct ir3_context *ctx, nir_intrinsic_instr *intr,
1656 struct ir3_instruction **dst)
1657 {
1658 struct ir3_block *b = ctx->block;
1659 struct ir3_instruction *ldl, *offset;
1660 unsigned base;
1661
1662 offset = get_src(ctx, &intr->src[0])[0];
1663 base = nir_intrinsic_base(intr);
1664
1665 ldl = ir3_LDL(b, offset, 0, create_immed(b, intr->num_components), 0);
1666 ldl->cat6.src_offset = base;
1667 ldl->cat6.type = utype_dst(intr->dest);
1668 ldl->regs[0]->wrmask = MASK(intr->num_components);
1669
1670 ldl->barrier_class = IR3_BARRIER_SHARED_R;
1671 ldl->barrier_conflict = IR3_BARRIER_SHARED_W;
1672
1673 split_dest(b, dst, ldl, 0, intr->num_components);
1674 }
1675
1676 /* src[] = { value, offset }. const_index[] = { base, write_mask } */
1677 static void
1678 emit_intrinsic_store_shared(struct ir3_context *ctx, nir_intrinsic_instr *intr)
1679 {
1680 struct ir3_block *b = ctx->block;
1681 struct ir3_instruction *stl, *offset;
1682 struct ir3_instruction * const *value;
1683 unsigned base, wrmask;
1684
1685 value = get_src(ctx, &intr->src[0]);
1686 offset = get_src(ctx, &intr->src[1])[0];
1687
1688 base = nir_intrinsic_base(intr);
1689 wrmask = nir_intrinsic_write_mask(intr);
1690
1691 /* Combine groups of consecutive enabled channels in one write
1692 * message. We use ffs to find the first enabled channel and then ffs on
1693 * the bit-inverse, down-shifted writemask to determine the length of
1694 * the block of enabled bits.
1695 *
1696 * (trick stolen from i965's fs_visitor::nir_emit_cs_intrinsic())
1697 */
1698 while (wrmask) {
1699 unsigned first_component = ffs(wrmask) - 1;
1700 unsigned length = ffs(~(wrmask >> first_component)) - 1;
1701
1702 stl = ir3_STL(b, offset, 0,
1703 create_collect(ctx, &value[first_component], length), 0,
1704 create_immed(b, length), 0);
1705 stl->cat6.dst_offset = first_component + base;
1706 stl->cat6.type = utype_src(intr->src[0]);
1707 stl->barrier_class = IR3_BARRIER_SHARED_W;
1708 stl->barrier_conflict = IR3_BARRIER_SHARED_R | IR3_BARRIER_SHARED_W;
1709
1710 array_insert(b, b->keeps, stl);
1711
1712 /* Clear the bits in the writemask that we just wrote, then try
1713 * again to see if more channels are left.
1714 */
1715 wrmask &= (15 << (first_component + length));
1716 }
1717 }
1718
1719 /*
1720 * CS shared variable atomic intrinsics
1721 *
1722 * All of the shared variable atomic memory operations read a value from
1723 * memory, compute a new value using one of the operations below, write the
1724 * new value to memory, and return the original value read.
1725 *
1726 * All operations take 2 sources except CompSwap that takes 3. These
1727 * sources represent:
1728 *
1729 * 0: The offset into the shared variable storage region that the atomic
1730 * operation will operate on.
1731 * 1: The data parameter to the atomic function (i.e. the value to add
1732 * in shared_atomic_add, etc).
1733 * 2: For CompSwap only: the second data parameter.
1734 */
1735 static struct ir3_instruction *
1736 emit_intrinsic_atomic_shared(struct ir3_context *ctx, nir_intrinsic_instr *intr)
1737 {
1738 struct ir3_block *b = ctx->block;
1739 struct ir3_instruction *atomic, *src0, *src1;
1740 type_t type = TYPE_U32;
1741
1742 src0 = get_src(ctx, &intr->src[0])[0]; /* offset */
1743 src1 = get_src(ctx, &intr->src[1])[0]; /* value */
1744
1745 switch (intr->intrinsic) {
1746 case nir_intrinsic_shared_atomic_add:
1747 atomic = ir3_ATOMIC_ADD(b, src0, 0, src1, 0);
1748 break;
1749 case nir_intrinsic_shared_atomic_imin:
1750 atomic = ir3_ATOMIC_MIN(b, src0, 0, src1, 0);
1751 type = TYPE_S32;
1752 break;
1753 case nir_intrinsic_shared_atomic_umin:
1754 atomic = ir3_ATOMIC_MIN(b, src0, 0, src1, 0);
1755 break;
1756 case nir_intrinsic_shared_atomic_imax:
1757 atomic = ir3_ATOMIC_MAX(b, src0, 0, src1, 0);
1758 type = TYPE_S32;
1759 break;
1760 case nir_intrinsic_shared_atomic_umax:
1761 atomic = ir3_ATOMIC_MAX(b, src0, 0, src1, 0);
1762 break;
1763 case nir_intrinsic_shared_atomic_and:
1764 atomic = ir3_ATOMIC_AND(b, src0, 0, src1, 0);
1765 break;
1766 case nir_intrinsic_shared_atomic_or:
1767 atomic = ir3_ATOMIC_OR(b, src0, 0, src1, 0);
1768 break;
1769 case nir_intrinsic_shared_atomic_xor:
1770 atomic = ir3_ATOMIC_XOR(b, src0, 0, src1, 0);
1771 break;
1772 case nir_intrinsic_shared_atomic_exchange:
1773 atomic = ir3_ATOMIC_XCHG(b, src0, 0, src1, 0);
1774 break;
1775 case nir_intrinsic_shared_atomic_comp_swap:
1776 /* for cmpxchg, src1 is [ui]vec2(data, compare): */
1777 src1 = create_collect(ctx, (struct ir3_instruction*[]){
1778 get_src(ctx, &intr->src[2])[0],
1779 src1,
1780 }, 2);
1781 atomic = ir3_ATOMIC_CMPXCHG(b, src0, 0, src1, 0);
1782 break;
1783 default:
1784 unreachable("boo");
1785 }
1786
1787 atomic->cat6.iim_val = 1;
1788 atomic->cat6.d = 1;
1789 atomic->cat6.type = type;
1790 atomic->barrier_class = IR3_BARRIER_SHARED_W;
1791 atomic->barrier_conflict = IR3_BARRIER_SHARED_R | IR3_BARRIER_SHARED_W;
1792
1793 /* even if nothing consume the result, we can't DCE the instruction: */
1794 array_insert(b, b->keeps, atomic);
1795
1796 return atomic;
1797 }
1798
1799 /* Images get mapped into SSBO/image state (for store/atomic) and texture
1800 * state block (for load). To simplify things, invert the image id and
1801 * map it from end of state block, ie. image 0 becomes num-1, image 1
1802 * becomes num-2, etc. This potentially avoids needing to re-emit texture
1803 * state when switching shaders.
1804 *
1805 * TODO is max # of samplers and SSBOs the same. This shouldn't be hard-
1806 * coded. Also, since all the gl shader stages (ie. everything but CS)
1807 * share the same SSBO/image state block, this might require some more
1808 * logic if we supported images in anything other than FS..
1809 */
1810 static unsigned
1811 get_image_slot(struct ir3_context *ctx, nir_deref_instr *deref)
1812 {
1813 unsigned int loc = 0;
1814 unsigned inner_size = 1;
1815
1816 while (deref->deref_type != nir_deref_type_var) {
1817 assert(deref->deref_type == nir_deref_type_array);
1818 nir_const_value *const_index = nir_src_as_const_value(deref->arr.index);
1819 assert(const_index);
1820
1821 /* Go to the next instruction */
1822 deref = nir_deref_instr_parent(deref);
1823
1824 assert(glsl_type_is_array(deref->type));
1825 const unsigned array_len = glsl_get_length(deref->type);
1826 loc += MIN2(const_index->u32[0], array_len - 1) * inner_size;
1827
1828 /* Update the inner size */
1829 inner_size *= array_len;
1830 }
1831
1832 loc += deref->var->data.driver_location;
1833
1834 /* TODO figure out real limit per generation, and don't hardcode: */
1835 const unsigned max_samplers = 16;
1836 return max_samplers - loc - 1;
1837 }
1838
1839 /* see tex_info() for equiv logic for texture instructions.. it would be
1840 * nice if this could be better unified..
1841 */
1842 static unsigned
1843 get_image_coords(const nir_variable *var, unsigned *flagsp)
1844 {
1845 const struct glsl_type *type = glsl_without_array(var->type);
1846 unsigned coords, flags = 0;
1847
1848 switch (glsl_get_sampler_dim(type)) {
1849 case GLSL_SAMPLER_DIM_1D:
1850 case GLSL_SAMPLER_DIM_BUF:
1851 coords = 1;
1852 break;
1853 case GLSL_SAMPLER_DIM_2D:
1854 case GLSL_SAMPLER_DIM_RECT:
1855 case GLSL_SAMPLER_DIM_EXTERNAL:
1856 case GLSL_SAMPLER_DIM_MS:
1857 coords = 2;
1858 break;
1859 case GLSL_SAMPLER_DIM_3D:
1860 case GLSL_SAMPLER_DIM_CUBE:
1861 flags |= IR3_INSTR_3D;
1862 coords = 3;
1863 break;
1864 default:
1865 unreachable("bad sampler dim");
1866 return 0;
1867 }
1868
1869 if (glsl_sampler_type_is_array(type)) {
1870 /* note: unlike tex_info(), adjust # of coords to include array idx: */
1871 coords++;
1872 flags |= IR3_INSTR_A;
1873 }
1874
1875 if (flagsp)
1876 *flagsp = flags;
1877
1878 return coords;
1879 }
1880
1881 static type_t
1882 get_image_type(const nir_variable *var)
1883 {
1884 switch (glsl_get_sampler_result_type(glsl_without_array(var->type))) {
1885 case GLSL_TYPE_UINT:
1886 return TYPE_U32;
1887 case GLSL_TYPE_INT:
1888 return TYPE_S32;
1889 case GLSL_TYPE_FLOAT:
1890 return TYPE_F32;
1891 default:
1892 unreachable("bad sampler type.");
1893 return 0;
1894 }
1895 }
1896
1897 static struct ir3_instruction *
1898 get_image_offset(struct ir3_context *ctx, const nir_variable *var,
1899 struct ir3_instruction * const *coords, bool byteoff)
1900 {
1901 struct ir3_block *b = ctx->block;
1902 struct ir3_instruction *offset;
1903 unsigned ncoords = get_image_coords(var, NULL);
1904
1905 /* to calculate the byte offset (yes, uggg) we need (up to) three
1906 * const values to know the bytes per pixel, and y and z stride:
1907 */
1908 unsigned cb = regid(ctx->so->constbase.image_dims, 0) +
1909 ctx->so->const_layout.image_dims.off[var->data.driver_location];
1910
1911 debug_assert(ctx->so->const_layout.image_dims.mask &
1912 (1 << var->data.driver_location));
1913
1914 /* offset = coords.x * bytes_per_pixel: */
1915 offset = ir3_MUL_S(b, coords[0], 0, create_uniform(ctx, cb + 0), 0);
1916 if (ncoords > 1) {
1917 /* offset += coords.y * y_pitch: */
1918 offset = ir3_MAD_S24(b, create_uniform(ctx, cb + 1), 0,
1919 coords[1], 0, offset, 0);
1920 }
1921 if (ncoords > 2) {
1922 /* offset += coords.z * z_pitch: */
1923 offset = ir3_MAD_S24(b, create_uniform(ctx, cb + 2), 0,
1924 coords[2], 0, offset, 0);
1925 }
1926
1927 if (!byteoff) {
1928 /* Some cases, like atomics, seem to use dword offset instead
1929 * of byte offsets.. blob just puts an extra shr.b in there
1930 * in those cases:
1931 */
1932 offset = ir3_SHR_B(b, offset, 0, create_immed(b, 2), 0);
1933 }
1934
1935 return create_collect(ctx, (struct ir3_instruction*[]){
1936 offset,
1937 create_immed(b, 0),
1938 }, 2);
1939 }
1940
1941 /* src[] = { deref, coord, sample_index }. const_index[] = {} */
1942 static void
1943 emit_intrinsic_load_image(struct ir3_context *ctx, nir_intrinsic_instr *intr,
1944 struct ir3_instruction **dst)
1945 {
1946 struct ir3_block *b = ctx->block;
1947 const nir_variable *var = nir_intrinsic_get_var(intr, 0);
1948 struct ir3_instruction *sam;
1949 struct ir3_instruction * const *src0 = get_src(ctx, &intr->src[1]);
1950 struct ir3_instruction *coords[4];
1951 unsigned flags, ncoords = get_image_coords(var, &flags);
1952 unsigned tex_idx = get_image_slot(ctx, nir_src_as_deref(intr->src[0]));
1953 type_t type = get_image_type(var);
1954
1955 /* hmm, this seems a bit odd, but it is what blob does and (at least
1956 * a5xx) just faults on bogus addresses otherwise:
1957 */
1958 if (flags & IR3_INSTR_3D) {
1959 flags &= ~IR3_INSTR_3D;
1960 flags |= IR3_INSTR_A;
1961 }
1962
1963 for (unsigned i = 0; i < ncoords; i++)
1964 coords[i] = src0[i];
1965
1966 if (ncoords == 1)
1967 coords[ncoords++] = create_immed(b, 0);
1968
1969 sam = ir3_SAM(b, OPC_ISAM, type, 0b1111, flags,
1970 tex_idx, tex_idx, create_collect(ctx, coords, ncoords), NULL);
1971
1972 sam->barrier_class = IR3_BARRIER_IMAGE_R;
1973 sam->barrier_conflict = IR3_BARRIER_IMAGE_W;
1974
1975 split_dest(b, dst, sam, 0, 4);
1976 }
1977
1978 /* src[] = { deref, coord, sample_index, value }. const_index[] = {} */
1979 static void
1980 emit_intrinsic_store_image(struct ir3_context *ctx, nir_intrinsic_instr *intr)
1981 {
1982 struct ir3_block *b = ctx->block;
1983 const nir_variable *var = nir_intrinsic_get_var(intr, 0);
1984 struct ir3_instruction *stib, *offset;
1985 struct ir3_instruction * const *value = get_src(ctx, &intr->src[3]);
1986 struct ir3_instruction * const *coords = get_src(ctx, &intr->src[1]);
1987 unsigned ncoords = get_image_coords(var, NULL);
1988 unsigned tex_idx = get_image_slot(ctx, nir_src_as_deref(intr->src[0]));
1989
1990 /* src0 is value
1991 * src1 is coords
1992 * src2 is 64b byte offset
1993 */
1994
1995 offset = get_image_offset(ctx, var, coords, true);
1996
1997 /* NOTE: stib seems to take byte offset, but stgb.typed can be used
1998 * too and takes a dword offset.. not quite sure yet why blob uses
1999 * one over the other in various cases.
2000 */
2001
2002 stib = ir3_STIB(b, create_immed(b, tex_idx), 0,
2003 create_collect(ctx, value, 4), 0,
2004 create_collect(ctx, coords, ncoords), 0,
2005 offset, 0);
2006 stib->cat6.iim_val = 4;
2007 stib->cat6.d = ncoords;
2008 stib->cat6.type = get_image_type(var);
2009 stib->cat6.typed = true;
2010 stib->barrier_class = IR3_BARRIER_IMAGE_W;
2011 stib->barrier_conflict = IR3_BARRIER_IMAGE_R | IR3_BARRIER_IMAGE_W;
2012
2013 array_insert(b, b->keeps, stib);
2014 }
2015
2016 static void
2017 emit_intrinsic_image_size(struct ir3_context *ctx, nir_intrinsic_instr *intr,
2018 struct ir3_instruction **dst)
2019 {
2020 struct ir3_block *b = ctx->block;
2021 const nir_variable *var = nir_intrinsic_get_var(intr, 0);
2022 unsigned tex_idx = get_image_slot(ctx, nir_src_as_deref(intr->src[0]));
2023 struct ir3_instruction *sam, *lod;
2024 unsigned flags, ncoords = get_image_coords(var, &flags);
2025
2026 lod = create_immed(b, 0);
2027 sam = ir3_SAM(b, OPC_GETSIZE, TYPE_U32, 0b1111, flags,
2028 tex_idx, tex_idx, lod, NULL);
2029
2030 /* Array size actually ends up in .w rather than .z. This doesn't
2031 * matter for miplevel 0, but for higher mips the value in z is
2032 * minified whereas w stays. Also, the value in TEX_CONST_3_DEPTH is
2033 * returned, which means that we have to add 1 to it for arrays for
2034 * a3xx.
2035 *
2036 * Note use a temporary dst and then copy, since the size of the dst
2037 * array that is passed in is based on nir's understanding of the
2038 * result size, not the hardware's
2039 */
2040 struct ir3_instruction *tmp[4];
2041
2042 split_dest(b, tmp, sam, 0, 4);
2043
2044 /* get_size instruction returns size in bytes instead of texels
2045 * for imageBuffer, so we need to divide it by the pixel size
2046 * of the image format.
2047 *
2048 * TODO: This is at least true on a5xx. Check other gens.
2049 */
2050 enum glsl_sampler_dim dim =
2051 glsl_get_sampler_dim(glsl_without_array(var->type));
2052 if (dim == GLSL_SAMPLER_DIM_BUF) {
2053 /* Since all the possible values the divisor can take are
2054 * power-of-two (4, 8, or 16), the division is implemented
2055 * as a shift-right.
2056 * During shader setup, the log2 of the image format's
2057 * bytes-per-pixel should have been emitted in 2nd slot of
2058 * image_dims. See ir3_shader::emit_image_dims().
2059 */
2060 unsigned cb = regid(ctx->so->constbase.image_dims, 0) +
2061 ctx->so->const_layout.image_dims.off[var->data.driver_location];
2062 struct ir3_instruction *aux = create_uniform(ctx, cb + 1);
2063
2064 tmp[0] = ir3_SHR_B(b, tmp[0], 0, aux, 0);
2065 }
2066
2067 for (unsigned i = 0; i < ncoords; i++)
2068 dst[i] = tmp[i];
2069
2070 if (flags & IR3_INSTR_A) {
2071 if (ctx->compiler->levels_add_one) {
2072 dst[ncoords-1] = ir3_ADD_U(b, tmp[3], 0, create_immed(b, 1), 0);
2073 } else {
2074 dst[ncoords-1] = ir3_MOV(b, tmp[3], TYPE_U32);
2075 }
2076 }
2077 }
2078
2079 /* src[] = { deref, coord, sample_index, value, compare }. const_index[] = {} */
2080 static struct ir3_instruction *
2081 emit_intrinsic_atomic_image(struct ir3_context *ctx, nir_intrinsic_instr *intr)
2082 {
2083 struct ir3_block *b = ctx->block;
2084 const nir_variable *var = nir_intrinsic_get_var(intr, 0);
2085 struct ir3_instruction *atomic, *image, *src0, *src1, *src2;
2086 struct ir3_instruction * const *coords = get_src(ctx, &intr->src[1]);
2087 unsigned ncoords = get_image_coords(var, NULL);
2088
2089 image = create_immed(b, get_image_slot(ctx, nir_src_as_deref(intr->src[0])));
2090
2091 /* src0 is value (or uvec2(value, compare))
2092 * src1 is coords
2093 * src2 is 64b byte offset
2094 */
2095 src0 = get_src(ctx, &intr->src[3])[0];
2096 src1 = create_collect(ctx, coords, ncoords);
2097 src2 = get_image_offset(ctx, var, coords, false);
2098
2099 switch (intr->intrinsic) {
2100 case nir_intrinsic_image_deref_atomic_add:
2101 atomic = ir3_ATOMIC_ADD_G(b, image, 0, src0, 0, src1, 0, src2, 0);
2102 break;
2103 case nir_intrinsic_image_deref_atomic_min:
2104 atomic = ir3_ATOMIC_MIN_G(b, image, 0, src0, 0, src1, 0, src2, 0);
2105 break;
2106 case nir_intrinsic_image_deref_atomic_max:
2107 atomic = ir3_ATOMIC_MAX_G(b, image, 0, src0, 0, src1, 0, src2, 0);
2108 break;
2109 case nir_intrinsic_image_deref_atomic_and:
2110 atomic = ir3_ATOMIC_AND_G(b, image, 0, src0, 0, src1, 0, src2, 0);
2111 break;
2112 case nir_intrinsic_image_deref_atomic_or:
2113 atomic = ir3_ATOMIC_OR_G(b, image, 0, src0, 0, src1, 0, src2, 0);
2114 break;
2115 case nir_intrinsic_image_deref_atomic_xor:
2116 atomic = ir3_ATOMIC_XOR_G(b, image, 0, src0, 0, src1, 0, src2, 0);
2117 break;
2118 case nir_intrinsic_image_deref_atomic_exchange:
2119 atomic = ir3_ATOMIC_XCHG_G(b, image, 0, src0, 0, src1, 0, src2, 0);
2120 break;
2121 case nir_intrinsic_image_deref_atomic_comp_swap:
2122 /* for cmpxchg, src0 is [ui]vec2(data, compare): */
2123 src0 = create_collect(ctx, (struct ir3_instruction*[]){
2124 get_src(ctx, &intr->src[4])[0],
2125 src0,
2126 }, 2);
2127 atomic = ir3_ATOMIC_CMPXCHG_G(b, image, 0, src0, 0, src1, 0, src2, 0);
2128 break;
2129 default:
2130 unreachable("boo");
2131 }
2132
2133 atomic->cat6.iim_val = 1;
2134 atomic->cat6.d = ncoords;
2135 atomic->cat6.type = get_image_type(var);
2136 atomic->cat6.typed = true;
2137 atomic->barrier_class = IR3_BARRIER_IMAGE_W;
2138 atomic->barrier_conflict = IR3_BARRIER_IMAGE_R | IR3_BARRIER_IMAGE_W;
2139
2140 /* even if nothing consume the result, we can't DCE the instruction: */
2141 array_insert(b, b->keeps, atomic);
2142
2143 return atomic;
2144 }
2145
2146 static void
2147 emit_intrinsic_barrier(struct ir3_context *ctx, nir_intrinsic_instr *intr)
2148 {
2149 struct ir3_block *b = ctx->block;
2150 struct ir3_instruction *barrier;
2151
2152 switch (intr->intrinsic) {
2153 case nir_intrinsic_barrier:
2154 barrier = ir3_BAR(b);
2155 barrier->cat7.g = true;
2156 barrier->cat7.l = true;
2157 barrier->flags = IR3_INSTR_SS | IR3_INSTR_SY;
2158 barrier->barrier_class = IR3_BARRIER_EVERYTHING;
2159 break;
2160 case nir_intrinsic_memory_barrier:
2161 barrier = ir3_FENCE(b);
2162 barrier->cat7.g = true;
2163 barrier->cat7.r = true;
2164 barrier->cat7.w = true;
2165 barrier->barrier_class = IR3_BARRIER_IMAGE_W |
2166 IR3_BARRIER_BUFFER_W;
2167 barrier->barrier_conflict =
2168 IR3_BARRIER_IMAGE_R | IR3_BARRIER_IMAGE_W |
2169 IR3_BARRIER_BUFFER_R | IR3_BARRIER_BUFFER_W;
2170 break;
2171 case nir_intrinsic_memory_barrier_atomic_counter:
2172 case nir_intrinsic_memory_barrier_buffer:
2173 barrier = ir3_FENCE(b);
2174 barrier->cat7.g = true;
2175 barrier->cat7.r = true;
2176 barrier->cat7.w = true;
2177 barrier->barrier_class = IR3_BARRIER_BUFFER_W;
2178 barrier->barrier_conflict = IR3_BARRIER_BUFFER_R |
2179 IR3_BARRIER_BUFFER_W;
2180 break;
2181 case nir_intrinsic_memory_barrier_image:
2182 // TODO double check if this should have .g set
2183 barrier = ir3_FENCE(b);
2184 barrier->cat7.g = true;
2185 barrier->cat7.r = true;
2186 barrier->cat7.w = true;
2187 barrier->barrier_class = IR3_BARRIER_IMAGE_W;
2188 barrier->barrier_conflict = IR3_BARRIER_IMAGE_R |
2189 IR3_BARRIER_IMAGE_W;
2190 break;
2191 case nir_intrinsic_memory_barrier_shared:
2192 barrier = ir3_FENCE(b);
2193 barrier->cat7.g = true;
2194 barrier->cat7.l = true;
2195 barrier->cat7.r = true;
2196 barrier->cat7.w = true;
2197 barrier->barrier_class = IR3_BARRIER_SHARED_W;
2198 barrier->barrier_conflict = IR3_BARRIER_SHARED_R |
2199 IR3_BARRIER_SHARED_W;
2200 break;
2201 case nir_intrinsic_group_memory_barrier:
2202 barrier = ir3_FENCE(b);
2203 barrier->cat7.g = true;
2204 barrier->cat7.l = true;
2205 barrier->cat7.r = true;
2206 barrier->cat7.w = true;
2207 barrier->barrier_class = IR3_BARRIER_SHARED_W |
2208 IR3_BARRIER_IMAGE_W |
2209 IR3_BARRIER_BUFFER_W;
2210 barrier->barrier_conflict =
2211 IR3_BARRIER_SHARED_R | IR3_BARRIER_SHARED_W |
2212 IR3_BARRIER_IMAGE_R | IR3_BARRIER_IMAGE_W |
2213 IR3_BARRIER_BUFFER_R | IR3_BARRIER_BUFFER_W;
2214 break;
2215 default:
2216 unreachable("boo");
2217 }
2218
2219 /* make sure barrier doesn't get DCE'd */
2220 array_insert(b, b->keeps, barrier);
2221 }
2222
2223 static void add_sysval_input_compmask(struct ir3_context *ctx,
2224 gl_system_value slot, unsigned compmask,
2225 struct ir3_instruction *instr)
2226 {
2227 struct ir3_shader_variant *so = ctx->so;
2228 unsigned r = regid(so->inputs_count, 0);
2229 unsigned n = so->inputs_count++;
2230
2231 so->inputs[n].sysval = true;
2232 so->inputs[n].slot = slot;
2233 so->inputs[n].compmask = compmask;
2234 so->inputs[n].regid = r;
2235 so->inputs[n].interpolate = INTERP_MODE_FLAT;
2236 so->total_in++;
2237
2238 ctx->ir->ninputs = MAX2(ctx->ir->ninputs, r + 1);
2239 ctx->ir->inputs[r] = instr;
2240 }
2241
2242 static void add_sysval_input(struct ir3_context *ctx, gl_system_value slot,
2243 struct ir3_instruction *instr)
2244 {
2245 add_sysval_input_compmask(ctx, slot, 0x1, instr);
2246 }
2247
2248 static void
2249 emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr)
2250 {
2251 const nir_intrinsic_info *info = &nir_intrinsic_infos[intr->intrinsic];
2252 struct ir3_instruction **dst;
2253 struct ir3_instruction * const *src;
2254 struct ir3_block *b = ctx->block;
2255 nir_const_value *const_offset;
2256 int idx, comp;
2257
2258 if (info->has_dest) {
2259 unsigned n = nir_intrinsic_dest_components(intr);
2260 dst = get_dst(ctx, &intr->dest, n);
2261 } else {
2262 dst = NULL;
2263 }
2264
2265 switch (intr->intrinsic) {
2266 case nir_intrinsic_load_uniform:
2267 idx = nir_intrinsic_base(intr);
2268 const_offset = nir_src_as_const_value(intr->src[0]);
2269 if (const_offset) {
2270 idx += const_offset->u32[0];
2271 for (int i = 0; i < intr->num_components; i++) {
2272 unsigned n = idx * 4 + i;
2273 dst[i] = create_uniform(ctx, n);
2274 }
2275 } else {
2276 src = get_src(ctx, &intr->src[0]);
2277 for (int i = 0; i < intr->num_components; i++) {
2278 int n = idx * 4 + i;
2279 dst[i] = create_uniform_indirect(ctx, n,
2280 get_addr(ctx, src[0], 4));
2281 }
2282 /* NOTE: if relative addressing is used, we set
2283 * constlen in the compiler (to worst-case value)
2284 * since we don't know in the assembler what the max
2285 * addr reg value can be:
2286 */
2287 ctx->so->constlen = ctx->s->num_uniforms;
2288 }
2289 break;
2290 case nir_intrinsic_load_ubo:
2291 emit_intrinsic_load_ubo(ctx, intr, dst);
2292 break;
2293 case nir_intrinsic_load_input:
2294 idx = nir_intrinsic_base(intr);
2295 comp = nir_intrinsic_component(intr);
2296 const_offset = nir_src_as_const_value(intr->src[0]);
2297 if (const_offset) {
2298 idx += const_offset->u32[0];
2299 for (int i = 0; i < intr->num_components; i++) {
2300 unsigned n = idx * 4 + i + comp;
2301 dst[i] = ctx->ir->inputs[n];
2302 }
2303 } else {
2304 src = get_src(ctx, &intr->src[0]);
2305 struct ir3_instruction *collect =
2306 create_collect(ctx, ctx->ir->inputs, ctx->ir->ninputs);
2307 struct ir3_instruction *addr = get_addr(ctx, src[0], 4);
2308 for (int i = 0; i < intr->num_components; i++) {
2309 unsigned n = idx * 4 + i + comp;
2310 dst[i] = create_indirect_load(ctx, ctx->ir->ninputs,
2311 n, addr, collect);
2312 }
2313 }
2314 break;
2315 case nir_intrinsic_load_ssbo:
2316 emit_intrinsic_load_ssbo(ctx, intr, dst);
2317 break;
2318 case nir_intrinsic_store_ssbo:
2319 emit_intrinsic_store_ssbo(ctx, intr);
2320 break;
2321 case nir_intrinsic_get_buffer_size:
2322 emit_intrinsic_ssbo_size(ctx, intr, dst);
2323 break;
2324 case nir_intrinsic_ssbo_atomic_add:
2325 case nir_intrinsic_ssbo_atomic_imin:
2326 case nir_intrinsic_ssbo_atomic_umin:
2327 case nir_intrinsic_ssbo_atomic_imax:
2328 case nir_intrinsic_ssbo_atomic_umax:
2329 case nir_intrinsic_ssbo_atomic_and:
2330 case nir_intrinsic_ssbo_atomic_or:
2331 case nir_intrinsic_ssbo_atomic_xor:
2332 case nir_intrinsic_ssbo_atomic_exchange:
2333 case nir_intrinsic_ssbo_atomic_comp_swap:
2334 dst[0] = emit_intrinsic_atomic_ssbo(ctx, intr);
2335 break;
2336 case nir_intrinsic_load_shared:
2337 emit_intrinsic_load_shared(ctx, intr, dst);
2338 break;
2339 case nir_intrinsic_store_shared:
2340 emit_intrinsic_store_shared(ctx, intr);
2341 break;
2342 case nir_intrinsic_shared_atomic_add:
2343 case nir_intrinsic_shared_atomic_imin:
2344 case nir_intrinsic_shared_atomic_umin:
2345 case nir_intrinsic_shared_atomic_imax:
2346 case nir_intrinsic_shared_atomic_umax:
2347 case nir_intrinsic_shared_atomic_and:
2348 case nir_intrinsic_shared_atomic_or:
2349 case nir_intrinsic_shared_atomic_xor:
2350 case nir_intrinsic_shared_atomic_exchange:
2351 case nir_intrinsic_shared_atomic_comp_swap:
2352 dst[0] = emit_intrinsic_atomic_shared(ctx, intr);
2353 break;
2354 case nir_intrinsic_image_deref_load:
2355 emit_intrinsic_load_image(ctx, intr, dst);
2356 break;
2357 case nir_intrinsic_image_deref_store:
2358 emit_intrinsic_store_image(ctx, intr);
2359 break;
2360 case nir_intrinsic_image_deref_size:
2361 emit_intrinsic_image_size(ctx, intr, dst);
2362 break;
2363 case nir_intrinsic_image_deref_atomic_add:
2364 case nir_intrinsic_image_deref_atomic_min:
2365 case nir_intrinsic_image_deref_atomic_max:
2366 case nir_intrinsic_image_deref_atomic_and:
2367 case nir_intrinsic_image_deref_atomic_or:
2368 case nir_intrinsic_image_deref_atomic_xor:
2369 case nir_intrinsic_image_deref_atomic_exchange:
2370 case nir_intrinsic_image_deref_atomic_comp_swap:
2371 dst[0] = emit_intrinsic_atomic_image(ctx, intr);
2372 break;
2373 case nir_intrinsic_barrier:
2374 case nir_intrinsic_memory_barrier:
2375 case nir_intrinsic_group_memory_barrier:
2376 case nir_intrinsic_memory_barrier_atomic_counter:
2377 case nir_intrinsic_memory_barrier_buffer:
2378 case nir_intrinsic_memory_barrier_image:
2379 case nir_intrinsic_memory_barrier_shared:
2380 emit_intrinsic_barrier(ctx, intr);
2381 /* note that blk ptr no longer valid, make that obvious: */
2382 b = NULL;
2383 break;
2384 case nir_intrinsic_store_output:
2385 idx = nir_intrinsic_base(intr);
2386 comp = nir_intrinsic_component(intr);
2387 const_offset = nir_src_as_const_value(intr->src[1]);
2388 compile_assert(ctx, const_offset != NULL);
2389 idx += const_offset->u32[0];
2390
2391 src = get_src(ctx, &intr->src[0]);
2392 for (int i = 0; i < intr->num_components; i++) {
2393 unsigned n = idx * 4 + i + comp;
2394 ctx->ir->outputs[n] = src[i];
2395 }
2396 break;
2397 case nir_intrinsic_load_base_vertex:
2398 case nir_intrinsic_load_first_vertex:
2399 if (!ctx->basevertex) {
2400 ctx->basevertex = create_driver_param(ctx, IR3_DP_VTXID_BASE);
2401 add_sysval_input(ctx, SYSTEM_VALUE_FIRST_VERTEX, ctx->basevertex);
2402 }
2403 dst[0] = ctx->basevertex;
2404 break;
2405 case nir_intrinsic_load_vertex_id_zero_base:
2406 case nir_intrinsic_load_vertex_id:
2407 if (!ctx->vertex_id) {
2408 gl_system_value sv = (intr->intrinsic == nir_intrinsic_load_vertex_id) ?
2409 SYSTEM_VALUE_VERTEX_ID : SYSTEM_VALUE_VERTEX_ID_ZERO_BASE;
2410 ctx->vertex_id = create_input(ctx, 0);
2411 add_sysval_input(ctx, sv, ctx->vertex_id);
2412 }
2413 dst[0] = ctx->vertex_id;
2414 break;
2415 case nir_intrinsic_load_instance_id:
2416 if (!ctx->instance_id) {
2417 ctx->instance_id = create_input(ctx, 0);
2418 add_sysval_input(ctx, SYSTEM_VALUE_INSTANCE_ID,
2419 ctx->instance_id);
2420 }
2421 dst[0] = ctx->instance_id;
2422 break;
2423 case nir_intrinsic_load_sample_id:
2424 case nir_intrinsic_load_sample_id_no_per_sample:
2425 if (!ctx->samp_id) {
2426 ctx->samp_id = create_input(ctx, 0);
2427 ctx->samp_id->regs[0]->flags |= IR3_REG_HALF;
2428 add_sysval_input(ctx, SYSTEM_VALUE_SAMPLE_ID,
2429 ctx->samp_id);
2430 }
2431 dst[0] = ir3_COV(b, ctx->samp_id, TYPE_U16, TYPE_U32);
2432 break;
2433 case nir_intrinsic_load_sample_mask_in:
2434 if (!ctx->samp_mask_in) {
2435 ctx->samp_mask_in = create_input(ctx, 0);
2436 add_sysval_input(ctx, SYSTEM_VALUE_SAMPLE_MASK_IN,
2437 ctx->samp_mask_in);
2438 }
2439 dst[0] = ctx->samp_mask_in;
2440 break;
2441 case nir_intrinsic_load_user_clip_plane:
2442 idx = nir_intrinsic_ucp_id(intr);
2443 for (int i = 0; i < intr->num_components; i++) {
2444 unsigned n = idx * 4 + i;
2445 dst[i] = create_driver_param(ctx, IR3_DP_UCP0_X + n);
2446 }
2447 break;
2448 case nir_intrinsic_load_front_face:
2449 if (!ctx->frag_face) {
2450 ctx->so->frag_face = true;
2451 ctx->frag_face = create_input(ctx, 0);
2452 add_sysval_input(ctx, SYSTEM_VALUE_FRONT_FACE, ctx->frag_face);
2453 ctx->frag_face->regs[0]->flags |= IR3_REG_HALF;
2454 }
2455 /* for fragface, we get -1 for back and 0 for front. However this is
2456 * the inverse of what nir expects (where ~0 is true).
2457 */
2458 dst[0] = ir3_COV(b, ctx->frag_face, TYPE_S16, TYPE_S32);
2459 dst[0] = ir3_NOT_B(b, dst[0], 0);
2460 break;
2461 case nir_intrinsic_load_local_invocation_id:
2462 if (!ctx->local_invocation_id) {
2463 ctx->local_invocation_id = create_input_compmask(ctx, 0, 0x7);
2464 add_sysval_input_compmask(ctx, SYSTEM_VALUE_LOCAL_INVOCATION_ID,
2465 0x7, ctx->local_invocation_id);
2466 }
2467 split_dest(b, dst, ctx->local_invocation_id, 0, 3);
2468 break;
2469 case nir_intrinsic_load_work_group_id:
2470 if (!ctx->work_group_id) {
2471 ctx->work_group_id = create_input_compmask(ctx, 0, 0x7);
2472 add_sysval_input_compmask(ctx, SYSTEM_VALUE_WORK_GROUP_ID,
2473 0x7, ctx->work_group_id);
2474 ctx->work_group_id->regs[0]->flags |= IR3_REG_HIGH;
2475 }
2476 split_dest(b, dst, ctx->work_group_id, 0, 3);
2477 break;
2478 case nir_intrinsic_load_num_work_groups:
2479 for (int i = 0; i < intr->num_components; i++) {
2480 dst[i] = create_driver_param(ctx, IR3_DP_NUM_WORK_GROUPS_X + i);
2481 }
2482 break;
2483 case nir_intrinsic_load_local_group_size:
2484 for (int i = 0; i < intr->num_components; i++) {
2485 dst[i] = create_driver_param(ctx, IR3_DP_LOCAL_GROUP_SIZE_X + i);
2486 }
2487 break;
2488 case nir_intrinsic_discard_if:
2489 case nir_intrinsic_discard: {
2490 struct ir3_instruction *cond, *kill;
2491
2492 if (intr->intrinsic == nir_intrinsic_discard_if) {
2493 /* conditional discard: */
2494 src = get_src(ctx, &intr->src[0]);
2495 cond = ir3_b2n(b, src[0]);
2496 } else {
2497 /* unconditional discard: */
2498 cond = create_immed(b, 1);
2499 }
2500
2501 /* NOTE: only cmps.*.* can write p0.x: */
2502 cond = ir3_CMPS_S(b, cond, 0, create_immed(b, 0), 0);
2503 cond->cat2.condition = IR3_COND_NE;
2504
2505 /* condition always goes in predicate register: */
2506 cond->regs[0]->num = regid(REG_P0, 0);
2507
2508 kill = ir3_KILL(b, cond, 0);
2509 array_insert(ctx->ir, ctx->ir->predicates, kill);
2510
2511 array_insert(b, b->keeps, kill);
2512 ctx->so->has_kill = true;
2513
2514 break;
2515 }
2516 default:
2517 compile_error(ctx, "Unhandled intrinsic type: %s\n",
2518 nir_intrinsic_infos[intr->intrinsic].name);
2519 break;
2520 }
2521
2522 if (info->has_dest)
2523 put_dst(ctx, &intr->dest);
2524 }
2525
2526 static void
2527 emit_load_const(struct ir3_context *ctx, nir_load_const_instr *instr)
2528 {
2529 struct ir3_instruction **dst = get_dst_ssa(ctx, &instr->def,
2530 instr->def.num_components);
2531 type_t type = (instr->def.bit_size < 32) ? TYPE_U16 : TYPE_U32;
2532
2533 for (int i = 0; i < instr->def.num_components; i++)
2534 dst[i] = create_immed_typed(ctx->block, instr->value.u32[i], type);
2535 }
2536
2537 static void
2538 emit_undef(struct ir3_context *ctx, nir_ssa_undef_instr *undef)
2539 {
2540 struct ir3_instruction **dst = get_dst_ssa(ctx, &undef->def,
2541 undef->def.num_components);
2542 type_t type = (undef->def.bit_size < 32) ? TYPE_U16 : TYPE_U32;
2543
2544 /* backend doesn't want undefined instructions, so just plug
2545 * in 0.0..
2546 */
2547 for (int i = 0; i < undef->def.num_components; i++)
2548 dst[i] = create_immed_typed(ctx->block, fui(0.0), type);
2549 }
2550
2551 /*
2552 * texture fetch/sample instructions:
2553 */
2554
2555 static void
2556 tex_info(nir_tex_instr *tex, unsigned *flagsp, unsigned *coordsp)
2557 {
2558 unsigned coords, flags = 0;
2559
2560 /* note: would use tex->coord_components.. except txs.. also,
2561 * since array index goes after shadow ref, we don't want to
2562 * count it:
2563 */
2564 switch (tex->sampler_dim) {
2565 case GLSL_SAMPLER_DIM_1D:
2566 case GLSL_SAMPLER_DIM_BUF:
2567 coords = 1;
2568 break;
2569 case GLSL_SAMPLER_DIM_2D:
2570 case GLSL_SAMPLER_DIM_RECT:
2571 case GLSL_SAMPLER_DIM_EXTERNAL:
2572 case GLSL_SAMPLER_DIM_MS:
2573 coords = 2;
2574 break;
2575 case GLSL_SAMPLER_DIM_3D:
2576 case GLSL_SAMPLER_DIM_CUBE:
2577 coords = 3;
2578 flags |= IR3_INSTR_3D;
2579 break;
2580 default:
2581 unreachable("bad sampler_dim");
2582 }
2583
2584 if (tex->is_shadow && tex->op != nir_texop_lod)
2585 flags |= IR3_INSTR_S;
2586
2587 if (tex->is_array && tex->op != nir_texop_lod)
2588 flags |= IR3_INSTR_A;
2589
2590 *flagsp = flags;
2591 *coordsp = coords;
2592 }
2593
2594 static void
2595 emit_tex(struct ir3_context *ctx, nir_tex_instr *tex)
2596 {
2597 struct ir3_block *b = ctx->block;
2598 struct ir3_instruction **dst, *sam, *src0[12], *src1[4];
2599 struct ir3_instruction * const *coord, * const *off, * const *ddx, * const *ddy;
2600 struct ir3_instruction *lod, *compare, *proj, *sample_index;
2601 bool has_bias = false, has_lod = false, has_proj = false, has_off = false;
2602 unsigned i, coords, flags;
2603 unsigned nsrc0 = 0, nsrc1 = 0;
2604 type_t type;
2605 opc_t opc = 0;
2606
2607 coord = off = ddx = ddy = NULL;
2608 lod = proj = compare = sample_index = NULL;
2609
2610 /* TODO: might just be one component for gathers? */
2611 dst = get_dst(ctx, &tex->dest, 4);
2612
2613 for (unsigned i = 0; i < tex->num_srcs; i++) {
2614 switch (tex->src[i].src_type) {
2615 case nir_tex_src_coord:
2616 coord = get_src(ctx, &tex->src[i].src);
2617 break;
2618 case nir_tex_src_bias:
2619 lod = get_src(ctx, &tex->src[i].src)[0];
2620 has_bias = true;
2621 break;
2622 case nir_tex_src_lod:
2623 lod = get_src(ctx, &tex->src[i].src)[0];
2624 has_lod = true;
2625 break;
2626 case nir_tex_src_comparator: /* shadow comparator */
2627 compare = get_src(ctx, &tex->src[i].src)[0];
2628 break;
2629 case nir_tex_src_projector:
2630 proj = get_src(ctx, &tex->src[i].src)[0];
2631 has_proj = true;
2632 break;
2633 case nir_tex_src_offset:
2634 off = get_src(ctx, &tex->src[i].src);
2635 has_off = true;
2636 break;
2637 case nir_tex_src_ddx:
2638 ddx = get_src(ctx, &tex->src[i].src);
2639 break;
2640 case nir_tex_src_ddy:
2641 ddy = get_src(ctx, &tex->src[i].src);
2642 break;
2643 case nir_tex_src_ms_index:
2644 sample_index = get_src(ctx, &tex->src[i].src)[0];
2645 break;
2646 default:
2647 compile_error(ctx, "Unhandled NIR tex src type: %d\n",
2648 tex->src[i].src_type);
2649 return;
2650 }
2651 }
2652
2653 switch (tex->op) {
2654 case nir_texop_tex: opc = has_lod ? OPC_SAML : OPC_SAM; break;
2655 case nir_texop_txb: opc = OPC_SAMB; break;
2656 case nir_texop_txl: opc = OPC_SAML; break;
2657 case nir_texop_txd: opc = OPC_SAMGQ; break;
2658 case nir_texop_txf: opc = OPC_ISAML; break;
2659 case nir_texop_lod: opc = OPC_GETLOD; break;
2660 case nir_texop_tg4:
2661 /* NOTE: a4xx might need to emulate gather w/ txf (this is
2662 * what blob does, seems gather is broken?), and a3xx did
2663 * not support it (but probably could also emulate).
2664 */
2665 switch (tex->component) {
2666 case 0: opc = OPC_GATHER4R; break;
2667 case 1: opc = OPC_GATHER4G; break;
2668 case 2: opc = OPC_GATHER4B; break;
2669 case 3: opc = OPC_GATHER4A; break;
2670 }
2671 break;
2672 case nir_texop_txf_ms: opc = OPC_ISAMM; break;
2673 case nir_texop_txs:
2674 case nir_texop_query_levels:
2675 case nir_texop_texture_samples:
2676 case nir_texop_samples_identical:
2677 case nir_texop_txf_ms_mcs:
2678 compile_error(ctx, "Unhandled NIR tex type: %d\n", tex->op);
2679 return;
2680 }
2681
2682 tex_info(tex, &flags, &coords);
2683
2684 /*
2685 * lay out the first argument in the proper order:
2686 * - actual coordinates first
2687 * - shadow reference
2688 * - array index
2689 * - projection w
2690 * - starting at offset 4, dpdx.xy, dpdy.xy
2691 *
2692 * bias/lod go into the second arg
2693 */
2694
2695 /* insert tex coords: */
2696 for (i = 0; i < coords; i++)
2697 src0[i] = coord[i];
2698
2699 nsrc0 = i;
2700
2701 /* NOTE a3xx (and possibly a4xx?) might be different, using isaml
2702 * with scaled x coord according to requested sample:
2703 */
2704 if (tex->op == nir_texop_txf_ms) {
2705 if (ctx->compiler->txf_ms_with_isaml) {
2706 /* the samples are laid out in x dimension as
2707 * 0 1 2 3
2708 * x_ms = (x << ms) + sample_index;
2709 */
2710 struct ir3_instruction *ms;
2711 ms = create_immed(b, (ctx->samples >> (2 * tex->texture_index)) & 3);
2712
2713 src0[0] = ir3_SHL_B(b, src0[0], 0, ms, 0);
2714 src0[0] = ir3_ADD_U(b, src0[0], 0, sample_index, 0);
2715
2716 opc = OPC_ISAML;
2717 } else {
2718 src0[nsrc0++] = sample_index;
2719 }
2720 }
2721
2722 /* scale up integer coords for TXF based on the LOD */
2723 if (ctx->compiler->unminify_coords && (opc == OPC_ISAML)) {
2724 assert(has_lod);
2725 for (i = 0; i < coords; i++)
2726 src0[i] = ir3_SHL_B(b, src0[i], 0, lod, 0);
2727 }
2728
2729 if (coords == 1) {
2730 /* hw doesn't do 1d, so we treat it as 2d with
2731 * height of 1, and patch up the y coord.
2732 * TODO: y coord should be (int)0 in some cases..
2733 */
2734 src0[nsrc0++] = create_immed(b, fui(0.5));
2735 }
2736
2737 if (tex->is_shadow && tex->op != nir_texop_lod)
2738 src0[nsrc0++] = compare;
2739
2740 if (tex->is_array && tex->op != nir_texop_lod) {
2741 struct ir3_instruction *idx = coord[coords];
2742
2743 /* the array coord for cube arrays needs 0.5 added to it */
2744 if (ctx->compiler->array_index_add_half && (opc != OPC_ISAML))
2745 idx = ir3_ADD_F(b, idx, 0, create_immed(b, fui(0.5)), 0);
2746
2747 src0[nsrc0++] = idx;
2748 }
2749
2750 if (has_proj) {
2751 src0[nsrc0++] = proj;
2752 flags |= IR3_INSTR_P;
2753 }
2754
2755 /* pad to 4, then ddx/ddy: */
2756 if (tex->op == nir_texop_txd) {
2757 while (nsrc0 < 4)
2758 src0[nsrc0++] = create_immed(b, fui(0.0));
2759 for (i = 0; i < coords; i++)
2760 src0[nsrc0++] = ddx[i];
2761 if (coords < 2)
2762 src0[nsrc0++] = create_immed(b, fui(0.0));
2763 for (i = 0; i < coords; i++)
2764 src0[nsrc0++] = ddy[i];
2765 if (coords < 2)
2766 src0[nsrc0++] = create_immed(b, fui(0.0));
2767 }
2768
2769 /*
2770 * second argument (if applicable):
2771 * - offsets
2772 * - lod
2773 * - bias
2774 */
2775 if (has_off | has_lod | has_bias) {
2776 if (has_off) {
2777 unsigned off_coords = coords;
2778 if (tex->sampler_dim == GLSL_SAMPLER_DIM_CUBE)
2779 off_coords--;
2780 for (i = 0; i < off_coords; i++)
2781 src1[nsrc1++] = off[i];
2782 if (off_coords < 2)
2783 src1[nsrc1++] = create_immed(b, fui(0.0));
2784 flags |= IR3_INSTR_O;
2785 }
2786
2787 if (has_lod | has_bias)
2788 src1[nsrc1++] = lod;
2789 }
2790
2791 switch (tex->dest_type) {
2792 case nir_type_invalid:
2793 case nir_type_float:
2794 type = TYPE_F32;
2795 break;
2796 case nir_type_int:
2797 type = TYPE_S32;
2798 break;
2799 case nir_type_uint:
2800 case nir_type_bool:
2801 type = TYPE_U32;
2802 break;
2803 default:
2804 unreachable("bad dest_type");
2805 }
2806
2807 if (opc == OPC_GETLOD)
2808 type = TYPE_U32;
2809
2810 unsigned tex_idx = tex->texture_index;
2811
2812 ctx->max_texture_index = MAX2(ctx->max_texture_index, tex_idx);
2813
2814 struct ir3_instruction *col0 = create_collect(ctx, src0, nsrc0);
2815 struct ir3_instruction *col1 = create_collect(ctx, src1, nsrc1);
2816
2817 sam = ir3_SAM(b, opc, type, 0b1111, flags,
2818 tex_idx, tex_idx, col0, col1);
2819
2820 if ((ctx->astc_srgb & (1 << tex_idx)) && !nir_tex_instr_is_query(tex)) {
2821 /* only need first 3 components: */
2822 sam->regs[0]->wrmask = 0x7;
2823 split_dest(b, dst, sam, 0, 3);
2824
2825 /* we need to sample the alpha separately with a non-ASTC
2826 * texture state:
2827 */
2828 sam = ir3_SAM(b, opc, type, 0b1000, flags,
2829 tex_idx, tex_idx, col0, col1);
2830
2831 array_insert(ctx->ir, ctx->ir->astc_srgb, sam);
2832
2833 /* fixup .w component: */
2834 split_dest(b, &dst[3], sam, 3, 1);
2835 } else {
2836 /* normal (non-workaround) case: */
2837 split_dest(b, dst, sam, 0, 4);
2838 }
2839
2840 /* GETLOD returns results in 4.8 fixed point */
2841 if (opc == OPC_GETLOD) {
2842 struct ir3_instruction *factor = create_immed(b, fui(1.0 / 256));
2843
2844 compile_assert(ctx, tex->dest_type == nir_type_float);
2845 for (i = 0; i < 2; i++) {
2846 dst[i] = ir3_MUL_F(b, ir3_COV(b, dst[i], TYPE_U32, TYPE_F32), 0,
2847 factor, 0);
2848 }
2849 }
2850
2851 put_dst(ctx, &tex->dest);
2852 }
2853
2854 static void
2855 emit_tex_query_levels(struct ir3_context *ctx, nir_tex_instr *tex)
2856 {
2857 struct ir3_block *b = ctx->block;
2858 struct ir3_instruction **dst, *sam;
2859
2860 dst = get_dst(ctx, &tex->dest, 1);
2861
2862 sam = ir3_SAM(b, OPC_GETINFO, TYPE_U32, 0b0100, 0,
2863 tex->texture_index, tex->texture_index, NULL, NULL);
2864
2865 /* even though there is only one component, since it ends
2866 * up in .z rather than .x, we need a split_dest()
2867 */
2868 split_dest(b, dst, sam, 0, 3);
2869
2870 /* The # of levels comes from getinfo.z. We need to add 1 to it, since
2871 * the value in TEX_CONST_0 is zero-based.
2872 */
2873 if (ctx->compiler->levels_add_one)
2874 dst[0] = ir3_ADD_U(b, dst[0], 0, create_immed(b, 1), 0);
2875
2876 put_dst(ctx, &tex->dest);
2877 }
2878
2879 static void
2880 emit_tex_txs(struct ir3_context *ctx, nir_tex_instr *tex)
2881 {
2882 struct ir3_block *b = ctx->block;
2883 struct ir3_instruction **dst, *sam;
2884 struct ir3_instruction *lod;
2885 unsigned flags, coords;
2886
2887 tex_info(tex, &flags, &coords);
2888
2889 /* Actually we want the number of dimensions, not coordinates. This
2890 * distinction only matters for cubes.
2891 */
2892 if (tex->sampler_dim == GLSL_SAMPLER_DIM_CUBE)
2893 coords = 2;
2894
2895 dst = get_dst(ctx, &tex->dest, 4);
2896
2897 compile_assert(ctx, tex->num_srcs == 1);
2898 compile_assert(ctx, tex->src[0].src_type == nir_tex_src_lod);
2899
2900 lod = get_src(ctx, &tex->src[0].src)[0];
2901
2902 sam = ir3_SAM(b, OPC_GETSIZE, TYPE_U32, 0b1111, flags,
2903 tex->texture_index, tex->texture_index, lod, NULL);
2904
2905 split_dest(b, dst, sam, 0, 4);
2906
2907 /* Array size actually ends up in .w rather than .z. This doesn't
2908 * matter for miplevel 0, but for higher mips the value in z is
2909 * minified whereas w stays. Also, the value in TEX_CONST_3_DEPTH is
2910 * returned, which means that we have to add 1 to it for arrays.
2911 */
2912 if (tex->is_array) {
2913 if (ctx->compiler->levels_add_one) {
2914 dst[coords] = ir3_ADD_U(b, dst[3], 0, create_immed(b, 1), 0);
2915 } else {
2916 dst[coords] = ir3_MOV(b, dst[3], TYPE_U32);
2917 }
2918 }
2919
2920 put_dst(ctx, &tex->dest);
2921 }
2922
2923 static void
2924 emit_jump(struct ir3_context *ctx, nir_jump_instr *jump)
2925 {
2926 switch (jump->type) {
2927 case nir_jump_break:
2928 case nir_jump_continue:
2929 case nir_jump_return:
2930 /* I *think* we can simply just ignore this, and use the
2931 * successor block link to figure out where we need to
2932 * jump to for break/continue
2933 */
2934 break;
2935 default:
2936 compile_error(ctx, "Unhandled NIR jump type: %d\n", jump->type);
2937 break;
2938 }
2939 }
2940
2941 static void
2942 emit_instr(struct ir3_context *ctx, nir_instr *instr)
2943 {
2944 switch (instr->type) {
2945 case nir_instr_type_alu:
2946 emit_alu(ctx, nir_instr_as_alu(instr));
2947 break;
2948 case nir_instr_type_deref:
2949 /* ignored, handled as part of the intrinsic they are src to */
2950 break;
2951 case nir_instr_type_intrinsic:
2952 emit_intrinsic(ctx, nir_instr_as_intrinsic(instr));
2953 break;
2954 case nir_instr_type_load_const:
2955 emit_load_const(ctx, nir_instr_as_load_const(instr));
2956 break;
2957 case nir_instr_type_ssa_undef:
2958 emit_undef(ctx, nir_instr_as_ssa_undef(instr));
2959 break;
2960 case nir_instr_type_tex: {
2961 nir_tex_instr *tex = nir_instr_as_tex(instr);
2962 /* couple tex instructions get special-cased:
2963 */
2964 switch (tex->op) {
2965 case nir_texop_txs:
2966 emit_tex_txs(ctx, tex);
2967 break;
2968 case nir_texop_query_levels:
2969 emit_tex_query_levels(ctx, tex);
2970 break;
2971 default:
2972 emit_tex(ctx, tex);
2973 break;
2974 }
2975 break;
2976 }
2977 case nir_instr_type_jump:
2978 emit_jump(ctx, nir_instr_as_jump(instr));
2979 break;
2980 case nir_instr_type_phi:
2981 /* we have converted phi webs to regs in NIR by now */
2982 compile_error(ctx, "Unexpected NIR instruction type: %d\n", instr->type);
2983 break;
2984 case nir_instr_type_call:
2985 case nir_instr_type_parallel_copy:
2986 compile_error(ctx, "Unhandled NIR instruction type: %d\n", instr->type);
2987 break;
2988 }
2989 }
2990
2991 static struct ir3_block *
2992 get_block(struct ir3_context *ctx, const nir_block *nblock)
2993 {
2994 struct ir3_block *block;
2995 struct hash_entry *hentry;
2996 unsigned i;
2997
2998 hentry = _mesa_hash_table_search(ctx->block_ht, nblock);
2999 if (hentry)
3000 return hentry->data;
3001
3002 block = ir3_block_create(ctx->ir);
3003 block->nblock = nblock;
3004 _mesa_hash_table_insert(ctx->block_ht, nblock, block);
3005
3006 block->predecessors_count = nblock->predecessors->entries;
3007 block->predecessors = ralloc_array_size(block,
3008 sizeof(block->predecessors[0]), block->predecessors_count);
3009 i = 0;
3010 set_foreach(nblock->predecessors, sentry) {
3011 block->predecessors[i++] = get_block(ctx, sentry->key);
3012 }
3013
3014 return block;
3015 }
3016
3017 static void
3018 emit_block(struct ir3_context *ctx, nir_block *nblock)
3019 {
3020 struct ir3_block *block = get_block(ctx, nblock);
3021
3022 for (int i = 0; i < ARRAY_SIZE(block->successors); i++) {
3023 if (nblock->successors[i]) {
3024 block->successors[i] =
3025 get_block(ctx, nblock->successors[i]);
3026 }
3027 }
3028
3029 ctx->block = block;
3030 list_addtail(&block->node, &ctx->ir->block_list);
3031
3032 /* re-emit addr register in each block if needed: */
3033 for (int i = 0; i < ARRAY_SIZE(ctx->addr_ht); i++) {
3034 _mesa_hash_table_destroy(ctx->addr_ht[i], NULL);
3035 ctx->addr_ht[i] = NULL;
3036 }
3037
3038 nir_foreach_instr(instr, nblock) {
3039 ctx->cur_instr = instr;
3040 emit_instr(ctx, instr);
3041 ctx->cur_instr = NULL;
3042 if (ctx->error)
3043 return;
3044 }
3045 }
3046
3047 static void emit_cf_list(struct ir3_context *ctx, struct exec_list *list);
3048
3049 static void
3050 emit_if(struct ir3_context *ctx, nir_if *nif)
3051 {
3052 struct ir3_instruction *condition = get_src(ctx, &nif->condition)[0];
3053
3054 ctx->block->condition =
3055 get_predicate(ctx, ir3_b2n(condition->block, condition));
3056
3057 emit_cf_list(ctx, &nif->then_list);
3058 emit_cf_list(ctx, &nif->else_list);
3059 }
3060
3061 static void
3062 emit_loop(struct ir3_context *ctx, nir_loop *nloop)
3063 {
3064 emit_cf_list(ctx, &nloop->body);
3065 }
3066
3067 static void
3068 emit_cf_list(struct ir3_context *ctx, struct exec_list *list)
3069 {
3070 foreach_list_typed(nir_cf_node, node, node, list) {
3071 switch (node->type) {
3072 case nir_cf_node_block:
3073 emit_block(ctx, nir_cf_node_as_block(node));
3074 break;
3075 case nir_cf_node_if:
3076 emit_if(ctx, nir_cf_node_as_if(node));
3077 break;
3078 case nir_cf_node_loop:
3079 emit_loop(ctx, nir_cf_node_as_loop(node));
3080 break;
3081 case nir_cf_node_function:
3082 compile_error(ctx, "TODO\n");
3083 break;
3084 }
3085 }
3086 }
3087
3088 /* emit stream-out code. At this point, the current block is the original
3089 * (nir) end block, and nir ensures that all flow control paths terminate
3090 * into the end block. We re-purpose the original end block to generate
3091 * the 'if (vtxcnt < maxvtxcnt)' condition, then append the conditional
3092 * block holding stream-out write instructions, followed by the new end
3093 * block:
3094 *
3095 * blockOrigEnd {
3096 * p0.x = (vtxcnt < maxvtxcnt)
3097 * // succs: blockStreamOut, blockNewEnd
3098 * }
3099 * blockStreamOut {
3100 * ... stream-out instructions ...
3101 * // succs: blockNewEnd
3102 * }
3103 * blockNewEnd {
3104 * }
3105 */
3106 static void
3107 emit_stream_out(struct ir3_context *ctx)
3108 {
3109 struct ir3_shader_variant *v = ctx->so;
3110 struct ir3 *ir = ctx->ir;
3111 struct ir3_stream_output_info *strmout =
3112 &ctx->so->shader->stream_output;
3113 struct ir3_block *orig_end_block, *stream_out_block, *new_end_block;
3114 struct ir3_instruction *vtxcnt, *maxvtxcnt, *cond;
3115 struct ir3_instruction *bases[IR3_MAX_SO_BUFFERS];
3116
3117 /* create vtxcnt input in input block at top of shader,
3118 * so that it is seen as live over the entire duration
3119 * of the shader:
3120 */
3121 vtxcnt = create_input(ctx, 0);
3122 add_sysval_input(ctx, SYSTEM_VALUE_VERTEX_CNT, vtxcnt);
3123
3124 maxvtxcnt = create_driver_param(ctx, IR3_DP_VTXCNT_MAX);
3125
3126 /* at this point, we are at the original 'end' block,
3127 * re-purpose this block to stream-out condition, then
3128 * append stream-out block and new-end block
3129 */
3130 orig_end_block = ctx->block;
3131
3132 // TODO these blocks need to update predecessors..
3133 // maybe w/ store_global intrinsic, we could do this
3134 // stuff in nir->nir pass
3135
3136 stream_out_block = ir3_block_create(ir);
3137 list_addtail(&stream_out_block->node, &ir->block_list);
3138
3139 new_end_block = ir3_block_create(ir);
3140 list_addtail(&new_end_block->node, &ir->block_list);
3141
3142 orig_end_block->successors[0] = stream_out_block;
3143 orig_end_block->successors[1] = new_end_block;
3144 stream_out_block->successors[0] = new_end_block;
3145
3146 /* setup 'if (vtxcnt < maxvtxcnt)' condition: */
3147 cond = ir3_CMPS_S(ctx->block, vtxcnt, 0, maxvtxcnt, 0);
3148 cond->regs[0]->num = regid(REG_P0, 0);
3149 cond->cat2.condition = IR3_COND_LT;
3150
3151 /* condition goes on previous block to the conditional,
3152 * since it is used to pick which of the two successor
3153 * paths to take:
3154 */
3155 orig_end_block->condition = cond;
3156
3157 /* switch to stream_out_block to generate the stream-out
3158 * instructions:
3159 */
3160 ctx->block = stream_out_block;
3161
3162 /* Calculate base addresses based on vtxcnt. Instructions
3163 * generated for bases not used in following loop will be
3164 * stripped out in the backend.
3165 */
3166 for (unsigned i = 0; i < IR3_MAX_SO_BUFFERS; i++) {
3167 unsigned stride = strmout->stride[i];
3168 struct ir3_instruction *base, *off;
3169
3170 base = create_uniform(ctx, regid(v->constbase.tfbo, i));
3171
3172 /* 24-bit should be enough: */
3173 off = ir3_MUL_U(ctx->block, vtxcnt, 0,
3174 create_immed(ctx->block, stride * 4), 0);
3175
3176 bases[i] = ir3_ADD_S(ctx->block, off, 0, base, 0);
3177 }
3178
3179 /* Generate the per-output store instructions: */
3180 for (unsigned i = 0; i < strmout->num_outputs; i++) {
3181 for (unsigned j = 0; j < strmout->output[i].num_components; j++) {
3182 unsigned c = j + strmout->output[i].start_component;
3183 struct ir3_instruction *base, *out, *stg;
3184
3185 base = bases[strmout->output[i].output_buffer];
3186 out = ctx->ir->outputs[regid(strmout->output[i].register_index, c)];
3187
3188 stg = ir3_STG(ctx->block, base, 0, out, 0,
3189 create_immed(ctx->block, 1), 0);
3190 stg->cat6.type = TYPE_U32;
3191 stg->cat6.dst_offset = (strmout->output[i].dst_offset + j) * 4;
3192
3193 array_insert(ctx->block, ctx->block->keeps, stg);
3194 }
3195 }
3196
3197 /* and finally switch to the new_end_block: */
3198 ctx->block = new_end_block;
3199 }
3200
3201 static void
3202 emit_function(struct ir3_context *ctx, nir_function_impl *impl)
3203 {
3204 nir_metadata_require(impl, nir_metadata_block_index);
3205
3206 emit_cf_list(ctx, &impl->body);
3207 emit_block(ctx, impl->end_block);
3208
3209 /* at this point, we should have a single empty block,
3210 * into which we emit the 'end' instruction.
3211 */
3212 compile_assert(ctx, list_empty(&ctx->block->instr_list));
3213
3214 /* If stream-out (aka transform-feedback) enabled, emit the
3215 * stream-out instructions, followed by a new empty block (into
3216 * which the 'end' instruction lands).
3217 *
3218 * NOTE: it is done in this order, rather than inserting before
3219 * we emit end_block, because NIR guarantees that all blocks
3220 * flow into end_block, and that end_block has no successors.
3221 * So by re-purposing end_block as the first block of stream-
3222 * out, we guarantee that all exit paths flow into the stream-
3223 * out instructions.
3224 */
3225 if ((ctx->compiler->gpu_id < 500) &&
3226 (ctx->so->shader->stream_output.num_outputs > 0) &&
3227 !ctx->so->binning_pass) {
3228 debug_assert(ctx->so->type == MESA_SHADER_VERTEX);
3229 emit_stream_out(ctx);
3230 }
3231
3232 ir3_END(ctx->block);
3233 }
3234
3235 static struct ir3_instruction *
3236 create_frag_coord(struct ir3_context *ctx, unsigned comp)
3237 {
3238 struct ir3_block *block = ctx->block;
3239 struct ir3_instruction *instr;
3240
3241 if (!ctx->frag_coord) {
3242 ctx->frag_coord = create_input_compmask(ctx, 0, 0xf);
3243 /* defer add_sysval_input() until after all inputs created */
3244 }
3245
3246 split_dest(block, &instr, ctx->frag_coord, comp, 1);
3247
3248 switch (comp) {
3249 case 0: /* .x */
3250 case 1: /* .y */
3251 /* for frag_coord, we get unsigned values.. we need
3252 * to subtract (integer) 8 and divide by 16 (right-
3253 * shift by 4) then convert to float:
3254 *
3255 * sub.s tmp, src, 8
3256 * shr.b tmp, tmp, 4
3257 * mov.u32f32 dst, tmp
3258 *
3259 */
3260 instr = ir3_SUB_S(block, instr, 0,
3261 create_immed(block, 8), 0);
3262 instr = ir3_SHR_B(block, instr, 0,
3263 create_immed(block, 4), 0);
3264 instr = ir3_COV(block, instr, TYPE_U32, TYPE_F32);
3265
3266 return instr;
3267 case 2: /* .z */
3268 case 3: /* .w */
3269 default:
3270 /* seems that we can use these as-is: */
3271 return instr;
3272 }
3273 }
3274
3275 static void
3276 setup_input(struct ir3_context *ctx, nir_variable *in)
3277 {
3278 struct ir3_shader_variant *so = ctx->so;
3279 unsigned ncomp = glsl_get_components(in->type);
3280 unsigned n = in->data.driver_location;
3281 unsigned slot = in->data.location;
3282
3283 /* let's pretend things other than vec4 don't exist: */
3284 ncomp = MAX2(ncomp, 4);
3285
3286 /* skip unread inputs, we could end up with (for example), unsplit
3287 * matrix/etc inputs in the case they are not read, so just silently
3288 * skip these.
3289 */
3290 if (ncomp > 4)
3291 return;
3292
3293 compile_assert(ctx, ncomp == 4);
3294
3295 so->inputs[n].slot = slot;
3296 so->inputs[n].compmask = (1 << ncomp) - 1;
3297 so->inputs_count = MAX2(so->inputs_count, n + 1);
3298 so->inputs[n].interpolate = in->data.interpolation;
3299
3300 if (ctx->so->type == MESA_SHADER_FRAGMENT) {
3301 for (int i = 0; i < ncomp; i++) {
3302 struct ir3_instruction *instr = NULL;
3303 unsigned idx = (n * 4) + i;
3304
3305 if (slot == VARYING_SLOT_POS) {
3306 so->inputs[n].bary = false;
3307 so->frag_coord = true;
3308 instr = create_frag_coord(ctx, i);
3309 } else if (slot == VARYING_SLOT_PNTC) {
3310 /* see for example st_nir_fixup_varying_slots().. this is
3311 * maybe a bit mesa/st specific. But we need things to line
3312 * up for this in fdN_program:
3313 * unsigned texmask = 1 << (slot - VARYING_SLOT_VAR0);
3314 * if (emit->sprite_coord_enable & texmask) {
3315 * ...
3316 * }
3317 */
3318 so->inputs[n].slot = VARYING_SLOT_VAR8;
3319 so->inputs[n].bary = true;
3320 instr = create_frag_input(ctx, false);
3321 } else {
3322 bool use_ldlv = false;
3323
3324 /* detect the special case for front/back colors where
3325 * we need to do flat vs smooth shading depending on
3326 * rast state:
3327 */
3328 if (in->data.interpolation == INTERP_MODE_NONE) {
3329 switch (slot) {
3330 case VARYING_SLOT_COL0:
3331 case VARYING_SLOT_COL1:
3332 case VARYING_SLOT_BFC0:
3333 case VARYING_SLOT_BFC1:
3334 so->inputs[n].rasterflat = true;
3335 break;
3336 default:
3337 break;
3338 }
3339 }
3340
3341 if (ctx->compiler->flat_bypass) {
3342 if ((so->inputs[n].interpolate == INTERP_MODE_FLAT) ||
3343 (so->inputs[n].rasterflat && ctx->so->key.rasterflat))
3344 use_ldlv = true;
3345 }
3346
3347 so->inputs[n].bary = true;
3348
3349 instr = create_frag_input(ctx, use_ldlv);
3350 }
3351
3352 compile_assert(ctx, idx < ctx->ir->ninputs);
3353
3354 ctx->ir->inputs[idx] = instr;
3355 }
3356 } else if (ctx->so->type == MESA_SHADER_VERTEX) {
3357 for (int i = 0; i < ncomp; i++) {
3358 unsigned idx = (n * 4) + i;
3359 compile_assert(ctx, idx < ctx->ir->ninputs);
3360 ctx->ir->inputs[idx] = create_input(ctx, idx);
3361 }
3362 } else {
3363 compile_error(ctx, "unknown shader type: %d\n", ctx->so->type);
3364 }
3365
3366 if (so->inputs[n].bary || (ctx->so->type == MESA_SHADER_VERTEX)) {
3367 so->total_in += ncomp;
3368 }
3369 }
3370
3371 static void
3372 setup_output(struct ir3_context *ctx, nir_variable *out)
3373 {
3374 struct ir3_shader_variant *so = ctx->so;
3375 unsigned ncomp = glsl_get_components(out->type);
3376 unsigned n = out->data.driver_location;
3377 unsigned slot = out->data.location;
3378 unsigned comp = 0;
3379
3380 /* let's pretend things other than vec4 don't exist: */
3381 ncomp = MAX2(ncomp, 4);
3382 compile_assert(ctx, ncomp == 4);
3383
3384 if (ctx->so->type == MESA_SHADER_FRAGMENT) {
3385 switch (slot) {
3386 case FRAG_RESULT_DEPTH:
3387 comp = 2; /* tgsi will write to .z component */
3388 so->writes_pos = true;
3389 break;
3390 case FRAG_RESULT_COLOR:
3391 so->color0_mrt = 1;
3392 break;
3393 default:
3394 if (slot >= FRAG_RESULT_DATA0)
3395 break;
3396 compile_error(ctx, "unknown FS output name: %s\n",
3397 gl_frag_result_name(slot));
3398 }
3399 } else if (ctx->so->type == MESA_SHADER_VERTEX) {
3400 switch (slot) {
3401 case VARYING_SLOT_POS:
3402 so->writes_pos = true;
3403 break;
3404 case VARYING_SLOT_PSIZ:
3405 so->writes_psize = true;
3406 break;
3407 case VARYING_SLOT_COL0:
3408 case VARYING_SLOT_COL1:
3409 case VARYING_SLOT_BFC0:
3410 case VARYING_SLOT_BFC1:
3411 case VARYING_SLOT_FOGC:
3412 case VARYING_SLOT_CLIP_DIST0:
3413 case VARYING_SLOT_CLIP_DIST1:
3414 case VARYING_SLOT_CLIP_VERTEX:
3415 break;
3416 default:
3417 if (slot >= VARYING_SLOT_VAR0)
3418 break;
3419 if ((VARYING_SLOT_TEX0 <= slot) && (slot <= VARYING_SLOT_TEX7))
3420 break;
3421 compile_error(ctx, "unknown VS output name: %s\n",
3422 gl_varying_slot_name(slot));
3423 }
3424 } else {
3425 compile_error(ctx, "unknown shader type: %d\n", ctx->so->type);
3426 }
3427
3428 compile_assert(ctx, n < ARRAY_SIZE(so->outputs));
3429
3430 so->outputs[n].slot = slot;
3431 so->outputs[n].regid = regid(n, comp);
3432 so->outputs_count = MAX2(so->outputs_count, n + 1);
3433
3434 for (int i = 0; i < ncomp; i++) {
3435 unsigned idx = (n * 4) + i;
3436 compile_assert(ctx, idx < ctx->ir->noutputs);
3437 ctx->ir->outputs[idx] = create_immed(ctx->block, fui(0.0));
3438 }
3439 }
3440
3441 static int
3442 max_drvloc(struct exec_list *vars)
3443 {
3444 int drvloc = -1;
3445 nir_foreach_variable(var, vars) {
3446 drvloc = MAX2(drvloc, (int)var->data.driver_location);
3447 }
3448 return drvloc;
3449 }
3450
3451 static const unsigned max_sysvals[] = {
3452 [MESA_SHADER_FRAGMENT] = 24, // TODO
3453 [MESA_SHADER_VERTEX] = 16,
3454 [MESA_SHADER_COMPUTE] = 16, // TODO how many do we actually need?
3455 };
3456
3457 static void
3458 emit_instructions(struct ir3_context *ctx)
3459 {
3460 unsigned ninputs, noutputs;
3461 nir_function_impl *fxn = nir_shader_get_entrypoint(ctx->s);
3462
3463 ninputs = (max_drvloc(&ctx->s->inputs) + 1) * 4;
3464 noutputs = (max_drvloc(&ctx->s->outputs) + 1) * 4;
3465
3466 /* we need to leave room for sysvals:
3467 */
3468 ninputs += max_sysvals[ctx->so->type];
3469
3470 ctx->ir = ir3_create(ctx->compiler, ninputs, noutputs);
3471
3472 /* Create inputs in first block: */
3473 ctx->block = get_block(ctx, nir_start_block(fxn));
3474 ctx->in_block = ctx->block;
3475 list_addtail(&ctx->block->node, &ctx->ir->block_list);
3476
3477 ninputs -= max_sysvals[ctx->so->type];
3478
3479 /* for fragment shader, the vcoord input register is used as the
3480 * base for bary.f varying fetch instrs:
3481 */
3482 struct ir3_instruction *vcoord = NULL;
3483 if (ctx->so->type == MESA_SHADER_FRAGMENT) {
3484 struct ir3_instruction *xy[2];
3485
3486 vcoord = create_input_compmask(ctx, 0, 0x3);
3487 split_dest(ctx->block, xy, vcoord, 0, 2);
3488
3489 ctx->frag_vcoord = create_collect(ctx, xy, 2);
3490 }
3491
3492 /* Setup inputs: */
3493 nir_foreach_variable(var, &ctx->s->inputs) {
3494 setup_input(ctx, var);
3495 }
3496
3497 /* Defer add_sysval_input() stuff until after setup_inputs(),
3498 * because sysvals need to be appended after varyings:
3499 */
3500 if (vcoord) {
3501 add_sysval_input_compmask(ctx, SYSTEM_VALUE_VARYING_COORD,
3502 0x3, vcoord);
3503 }
3504
3505 if (ctx->frag_coord) {
3506 add_sysval_input_compmask(ctx, SYSTEM_VALUE_FRAG_COORD,
3507 0xf, ctx->frag_coord);
3508 }
3509
3510 /* Setup outputs: */
3511 nir_foreach_variable(var, &ctx->s->outputs) {
3512 setup_output(ctx, var);
3513 }
3514
3515 /* Setup registers (which should only be arrays): */
3516 nir_foreach_register(reg, &ctx->s->registers) {
3517 declare_array(ctx, reg);
3518 }
3519
3520 /* NOTE: need to do something more clever when we support >1 fxn */
3521 nir_foreach_register(reg, &fxn->registers) {
3522 declare_array(ctx, reg);
3523 }
3524 /* And emit the body: */
3525 ctx->impl = fxn;
3526 emit_function(ctx, fxn);
3527 }
3528
3529 /* from NIR perspective, we actually have varying inputs. But the varying
3530 * inputs, from an IR standpoint, are just bary.f/ldlv instructions. The
3531 * only actual inputs are the sysvals.
3532 */
3533 static void
3534 fixup_frag_inputs(struct ir3_context *ctx)
3535 {
3536 struct ir3_shader_variant *so = ctx->so;
3537 struct ir3 *ir = ctx->ir;
3538 unsigned i = 0;
3539
3540 /* sysvals should appear at the end of the inputs, drop everything else: */
3541 while ((i < so->inputs_count) && !so->inputs[i].sysval)
3542 i++;
3543
3544 /* at IR level, inputs are always blocks of 4 scalars: */
3545 i *= 4;
3546
3547 ir->inputs = &ir->inputs[i];
3548 ir->ninputs -= i;
3549 }
3550
3551 /* Fixup tex sampler state for astc/srgb workaround instructions. We
3552 * need to assign the tex state indexes for these after we know the
3553 * max tex index.
3554 */
3555 static void
3556 fixup_astc_srgb(struct ir3_context *ctx)
3557 {
3558 struct ir3_shader_variant *so = ctx->so;
3559 /* indexed by original tex idx, value is newly assigned alpha sampler
3560 * state tex idx. Zero is invalid since there is at least one sampler
3561 * if we get here.
3562 */
3563 unsigned alt_tex_state[16] = {0};
3564 unsigned tex_idx = ctx->max_texture_index + 1;
3565 unsigned idx = 0;
3566
3567 so->astc_srgb.base = tex_idx;
3568
3569 for (unsigned i = 0; i < ctx->ir->astc_srgb_count; i++) {
3570 struct ir3_instruction *sam = ctx->ir->astc_srgb[i];
3571
3572 compile_assert(ctx, sam->cat5.tex < ARRAY_SIZE(alt_tex_state));
3573
3574 if (alt_tex_state[sam->cat5.tex] == 0) {
3575 /* assign new alternate/alpha tex state slot: */
3576 alt_tex_state[sam->cat5.tex] = tex_idx++;
3577 so->astc_srgb.orig_idx[idx++] = sam->cat5.tex;
3578 so->astc_srgb.count++;
3579 }
3580
3581 sam->cat5.tex = alt_tex_state[sam->cat5.tex];
3582 }
3583 }
3584
3585 static void
3586 fixup_binning_pass(struct ir3_context *ctx)
3587 {
3588 struct ir3_shader_variant *so = ctx->so;
3589 struct ir3 *ir = ctx->ir;
3590 unsigned i, j;
3591
3592 for (i = 0, j = 0; i < so->outputs_count; i++) {
3593 unsigned slot = so->outputs[i].slot;
3594
3595 /* throw away everything but first position/psize */
3596 if ((slot == VARYING_SLOT_POS) || (slot == VARYING_SLOT_PSIZ)) {
3597 if (i != j) {
3598 so->outputs[j] = so->outputs[i];
3599 ir->outputs[(j*4)+0] = ir->outputs[(i*4)+0];
3600 ir->outputs[(j*4)+1] = ir->outputs[(i*4)+1];
3601 ir->outputs[(j*4)+2] = ir->outputs[(i*4)+2];
3602 ir->outputs[(j*4)+3] = ir->outputs[(i*4)+3];
3603 }
3604 j++;
3605 }
3606 }
3607 so->outputs_count = j;
3608 ir->noutputs = j * 4;
3609 }
3610
3611 int
3612 ir3_compile_shader_nir(struct ir3_compiler *compiler,
3613 struct ir3_shader_variant *so)
3614 {
3615 struct ir3_context *ctx;
3616 struct ir3 *ir;
3617 struct ir3_instruction **inputs;
3618 unsigned i, actual_in, inloc;
3619 int ret = 0, max_bary;
3620
3621 assert(!so->ir);
3622
3623 ctx = compile_init(compiler, so);
3624 if (!ctx) {
3625 DBG("INIT failed!");
3626 ret = -1;
3627 goto out;
3628 }
3629
3630 emit_instructions(ctx);
3631
3632 if (ctx->error) {
3633 DBG("EMIT failed!");
3634 ret = -1;
3635 goto out;
3636 }
3637
3638 ir = so->ir = ctx->ir;
3639
3640 /* keep track of the inputs from TGSI perspective.. */
3641 inputs = ir->inputs;
3642
3643 /* but fixup actual inputs for frag shader: */
3644 if (so->type == MESA_SHADER_FRAGMENT)
3645 fixup_frag_inputs(ctx);
3646
3647 /* at this point, for binning pass, throw away unneeded outputs: */
3648 if (so->binning_pass && (ctx->compiler->gpu_id < 600))
3649 fixup_binning_pass(ctx);
3650
3651 /* if we want half-precision outputs, mark the output registers
3652 * as half:
3653 */
3654 if (so->key.half_precision) {
3655 for (i = 0; i < ir->noutputs; i++) {
3656 struct ir3_instruction *out = ir->outputs[i];
3657
3658 if (!out)
3659 continue;
3660
3661 /* if frag shader writes z, that needs to be full precision: */
3662 if (so->outputs[i/4].slot == FRAG_RESULT_DEPTH)
3663 continue;
3664
3665 out->regs[0]->flags |= IR3_REG_HALF;
3666 /* output could be a fanout (ie. texture fetch output)
3667 * in which case we need to propagate the half-reg flag
3668 * up to the definer so that RA sees it:
3669 */
3670 if (out->opc == OPC_META_FO) {
3671 out = out->regs[1]->instr;
3672 out->regs[0]->flags |= IR3_REG_HALF;
3673 }
3674
3675 if (out->opc == OPC_MOV) {
3676 out->cat1.dst_type = half_type(out->cat1.dst_type);
3677 }
3678 }
3679 }
3680
3681 if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
3682 printf("BEFORE CP:\n");
3683 ir3_print(ir);
3684 }
3685
3686 ir3_cp(ir, so);
3687
3688 /* at this point, for binning pass, throw away unneeded outputs:
3689 * Note that for a6xx and later, we do this after ir3_cp to ensure
3690 * that the uniform/constant layout for BS and VS matches, so that
3691 * we can re-use same VS_CONST state group.
3692 */
3693 if (so->binning_pass && (ctx->compiler->gpu_id >= 600))
3694 fixup_binning_pass(ctx);
3695
3696 /* Insert mov if there's same instruction for each output.
3697 * eg. dEQP-GLES31.functional.shaders.opaque_type_indexing.sampler.const_expression.vertex.sampler2dshadow
3698 */
3699 for (int i = ir->noutputs - 1; i >= 0; i--) {
3700 if (!ir->outputs[i])
3701 continue;
3702 for (unsigned j = 0; j < i; j++) {
3703 if (ir->outputs[i] == ir->outputs[j]) {
3704 ir->outputs[i] =
3705 ir3_MOV(ir->outputs[i]->block, ir->outputs[i], TYPE_F32);
3706 }
3707 }
3708 }
3709
3710 if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
3711 printf("BEFORE GROUPING:\n");
3712 ir3_print(ir);
3713 }
3714
3715 ir3_sched_add_deps(ir);
3716
3717 /* Group left/right neighbors, inserting mov's where needed to
3718 * solve conflicts:
3719 */
3720 ir3_group(ir);
3721
3722 if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
3723 printf("AFTER GROUPING:\n");
3724 ir3_print(ir);
3725 }
3726
3727 ir3_depth(ir);
3728
3729 if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
3730 printf("AFTER DEPTH:\n");
3731 ir3_print(ir);
3732 }
3733
3734 ret = ir3_sched(ir);
3735 if (ret) {
3736 DBG("SCHED failed!");
3737 goto out;
3738 }
3739
3740 if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
3741 printf("AFTER SCHED:\n");
3742 ir3_print(ir);
3743 }
3744
3745 ret = ir3_ra(ir, so->type, so->frag_coord, so->frag_face);
3746 if (ret) {
3747 DBG("RA failed!");
3748 goto out;
3749 }
3750
3751 if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
3752 printf("AFTER RA:\n");
3753 ir3_print(ir);
3754 }
3755
3756 /* fixup input/outputs: */
3757 for (i = 0; i < so->outputs_count; i++) {
3758 so->outputs[i].regid = ir->outputs[i*4]->regs[0]->num;
3759 }
3760
3761 /* Note that some or all channels of an input may be unused: */
3762 actual_in = 0;
3763 inloc = 0;
3764 for (i = 0; i < so->inputs_count; i++) {
3765 unsigned j, reg = regid(63,0), compmask = 0, maxcomp = 0;
3766 so->inputs[i].ncomp = 0;
3767 so->inputs[i].inloc = inloc;
3768 for (j = 0; j < 4; j++) {
3769 struct ir3_instruction *in = inputs[(i*4) + j];
3770 if (in && !(in->flags & IR3_INSTR_UNUSED)) {
3771 compmask |= (1 << j);
3772 reg = in->regs[0]->num - j;
3773 actual_in++;
3774 so->inputs[i].ncomp++;
3775 if ((so->type == MESA_SHADER_FRAGMENT) && so->inputs[i].bary) {
3776 /* assign inloc: */
3777 assert(in->regs[1]->flags & IR3_REG_IMMED);
3778 in->regs[1]->iim_val = inloc + j;
3779 maxcomp = j + 1;
3780 }
3781 }
3782 }
3783 if ((so->type == MESA_SHADER_FRAGMENT) && compmask && so->inputs[i].bary) {
3784 so->varying_in++;
3785 so->inputs[i].compmask = (1 << maxcomp) - 1;
3786 inloc += maxcomp;
3787 } else if (!so->inputs[i].sysval) {
3788 so->inputs[i].compmask = compmask;
3789 }
3790 so->inputs[i].regid = reg;
3791 }
3792
3793 if (ctx->astc_srgb)
3794 fixup_astc_srgb(ctx);
3795
3796 /* We need to do legalize after (for frag shader's) the "bary.f"
3797 * offsets (inloc) have been assigned.
3798 */
3799 ir3_legalize(ir, &so->num_samp, &so->has_ssbo, &max_bary);
3800
3801 if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
3802 printf("AFTER LEGALIZE:\n");
3803 ir3_print(ir);
3804 }
3805
3806 /* Note that actual_in counts inputs that are not bary.f'd for FS: */
3807 if (so->type == MESA_SHADER_VERTEX)
3808 so->total_in = actual_in;
3809 else
3810 so->total_in = max_bary + 1;
3811
3812 out:
3813 if (ret) {
3814 if (so->ir)
3815 ir3_destroy(so->ir);
3816 so->ir = NULL;
3817 }
3818 compile_free(ctx);
3819
3820 return ret;
3821 }