freedreno/ir3: find # of samplers from uniform vars
[mesa.git] / src / freedreno / ir3 / ir3_compiler_nir.c
1 /*
2 * Copyright (C) 2015 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #include <stdarg.h>
28
29 #include "util/u_string.h"
30 #include "util/u_memory.h"
31 #include "util/u_math.h"
32
33 #include "ir3_compiler.h"
34 #include "ir3_image.h"
35 #include "ir3_shader.h"
36 #include "ir3_nir.h"
37
38 #include "instr-a3xx.h"
39 #include "ir3.h"
40 #include "ir3_context.h"
41
42
43 static struct ir3_instruction *
44 create_indirect_load(struct ir3_context *ctx, unsigned arrsz, int n,
45 struct ir3_instruction *address, struct ir3_instruction *collect)
46 {
47 struct ir3_block *block = ctx->block;
48 struct ir3_instruction *mov;
49 struct ir3_register *src;
50
51 mov = ir3_instr_create(block, OPC_MOV);
52 mov->cat1.src_type = TYPE_U32;
53 mov->cat1.dst_type = TYPE_U32;
54 ir3_reg_create(mov, 0, 0);
55 src = ir3_reg_create(mov, 0, IR3_REG_SSA | IR3_REG_RELATIV);
56 src->instr = collect;
57 src->size = arrsz;
58 src->array.offset = n;
59
60 ir3_instr_set_address(mov, address);
61
62 return mov;
63 }
64
65 static struct ir3_instruction *
66 create_input_compmask(struct ir3_context *ctx, unsigned n, unsigned compmask)
67 {
68 struct ir3_instruction *in;
69
70 in = ir3_instr_create(ctx->in_block, OPC_META_INPUT);
71 in->inout.block = ctx->in_block;
72 ir3_reg_create(in, n, 0);
73
74 in->regs[0]->wrmask = compmask;
75
76 return in;
77 }
78
79 static struct ir3_instruction *
80 create_input(struct ir3_context *ctx, unsigned n)
81 {
82 return create_input_compmask(ctx, n, 0x1);
83 }
84
85 static struct ir3_instruction *
86 create_frag_input(struct ir3_context *ctx, bool use_ldlv)
87 {
88 struct ir3_block *block = ctx->block;
89 struct ir3_instruction *instr;
90 /* actual inloc is assigned and fixed up later: */
91 struct ir3_instruction *inloc = create_immed(block, 0);
92
93 if (use_ldlv) {
94 instr = ir3_LDLV(block, inloc, 0, create_immed(block, 1), 0);
95 instr->cat6.type = TYPE_U32;
96 instr->cat6.iim_val = 1;
97 } else {
98 instr = ir3_BARY_F(block, inloc, 0, ctx->frag_vcoord, 0);
99 instr->regs[2]->wrmask = 0x3;
100 }
101
102 return instr;
103 }
104
105 static struct ir3_instruction *
106 create_driver_param(struct ir3_context *ctx, enum ir3_driver_param dp)
107 {
108 /* first four vec4 sysval's reserved for UBOs: */
109 /* NOTE: dp is in scalar, but there can be >4 dp components: */
110 unsigned n = ctx->so->constbase.driver_param;
111 unsigned r = regid(n + dp / 4, dp % 4);
112 return create_uniform(ctx->block, r);
113 }
114
115 /*
116 * Adreno uses uint rather than having dedicated bool type,
117 * which (potentially) requires some conversion, in particular
118 * when using output of an bool instr to int input, or visa
119 * versa.
120 *
121 * | Adreno | NIR |
122 * -------+---------+-------+-
123 * true | 1 | ~0 |
124 * false | 0 | 0 |
125 *
126 * To convert from an adreno bool (uint) to nir, use:
127 *
128 * absneg.s dst, (neg)src
129 *
130 * To convert back in the other direction:
131 *
132 * absneg.s dst, (abs)arc
133 *
134 * The CP step can clean up the absneg.s that cancel each other
135 * out, and with a slight bit of extra cleverness (to recognize
136 * the instructions which produce either a 0 or 1) can eliminate
137 * the absneg.s's completely when an instruction that wants
138 * 0/1 consumes the result. For example, when a nir 'bcsel'
139 * consumes the result of 'feq'. So we should be able to get by
140 * without a boolean resolve step, and without incuring any
141 * extra penalty in instruction count.
142 */
143
144 /* NIR bool -> native (adreno): */
145 static struct ir3_instruction *
146 ir3_b2n(struct ir3_block *block, struct ir3_instruction *instr)
147 {
148 return ir3_ABSNEG_S(block, instr, IR3_REG_SABS);
149 }
150
151 /* native (adreno) -> NIR bool: */
152 static struct ir3_instruction *
153 ir3_n2b(struct ir3_block *block, struct ir3_instruction *instr)
154 {
155 return ir3_ABSNEG_S(block, instr, IR3_REG_SNEG);
156 }
157
158 /*
159 * alu/sfu instructions:
160 */
161
162 static struct ir3_instruction *
163 create_cov(struct ir3_context *ctx, struct ir3_instruction *src,
164 unsigned src_bitsize, nir_op op)
165 {
166 type_t src_type, dst_type;
167
168 switch (op) {
169 case nir_op_f2f32:
170 case nir_op_f2f16_rtne:
171 case nir_op_f2f16_rtz:
172 case nir_op_f2f16:
173 case nir_op_f2i32:
174 case nir_op_f2i16:
175 case nir_op_f2i8:
176 case nir_op_f2u32:
177 case nir_op_f2u16:
178 case nir_op_f2u8:
179 switch (src_bitsize) {
180 case 32:
181 src_type = TYPE_F32;
182 break;
183 case 16:
184 src_type = TYPE_F16;
185 break;
186 default:
187 ir3_context_error(ctx, "invalid src bit size: %u", src_bitsize);
188 }
189 break;
190
191 case nir_op_i2f32:
192 case nir_op_i2f16:
193 case nir_op_i2i32:
194 case nir_op_i2i16:
195 case nir_op_i2i8:
196 switch (src_bitsize) {
197 case 32:
198 src_type = TYPE_S32;
199 break;
200 case 16:
201 src_type = TYPE_S16;
202 break;
203 case 8:
204 src_type = TYPE_S8;
205 break;
206 default:
207 ir3_context_error(ctx, "invalid src bit size: %u", src_bitsize);
208 }
209 break;
210
211 case nir_op_u2f32:
212 case nir_op_u2f16:
213 case nir_op_u2u32:
214 case nir_op_u2u16:
215 case nir_op_u2u8:
216 switch (src_bitsize) {
217 case 32:
218 src_type = TYPE_U32;
219 break;
220 case 16:
221 src_type = TYPE_U16;
222 break;
223 case 8:
224 src_type = TYPE_U8;
225 break;
226 default:
227 ir3_context_error(ctx, "invalid src bit size: %u", src_bitsize);
228 }
229 break;
230
231 default:
232 ir3_context_error(ctx, "invalid conversion op: %u", op);
233 }
234
235 switch (op) {
236 case nir_op_f2f32:
237 case nir_op_i2f32:
238 case nir_op_u2f32:
239 dst_type = TYPE_F32;
240 break;
241
242 case nir_op_f2f16_rtne:
243 case nir_op_f2f16_rtz:
244 case nir_op_f2f16:
245 /* TODO how to handle rounding mode? */
246 case nir_op_i2f16:
247 case nir_op_u2f16:
248 dst_type = TYPE_F16;
249 break;
250
251 case nir_op_f2i32:
252 case nir_op_i2i32:
253 dst_type = TYPE_S32;
254 break;
255
256 case nir_op_f2i16:
257 case nir_op_i2i16:
258 dst_type = TYPE_S16;
259 break;
260
261 case nir_op_f2i8:
262 case nir_op_i2i8:
263 dst_type = TYPE_S8;
264 break;
265
266 case nir_op_f2u32:
267 case nir_op_u2u32:
268 dst_type = TYPE_U32;
269 break;
270
271 case nir_op_f2u16:
272 case nir_op_u2u16:
273 dst_type = TYPE_U16;
274 break;
275
276 case nir_op_f2u8:
277 case nir_op_u2u8:
278 dst_type = TYPE_U8;
279 break;
280
281 default:
282 ir3_context_error(ctx, "invalid conversion op: %u", op);
283 }
284
285 return ir3_COV(ctx->block, src, src_type, dst_type);
286 }
287
288 static void
289 emit_alu(struct ir3_context *ctx, nir_alu_instr *alu)
290 {
291 const nir_op_info *info = &nir_op_infos[alu->op];
292 struct ir3_instruction **dst, *src[info->num_inputs];
293 unsigned bs[info->num_inputs]; /* bit size */
294 struct ir3_block *b = ctx->block;
295 unsigned dst_sz, wrmask;
296
297 if (alu->dest.dest.is_ssa) {
298 dst_sz = alu->dest.dest.ssa.num_components;
299 wrmask = (1 << dst_sz) - 1;
300 } else {
301 dst_sz = alu->dest.dest.reg.reg->num_components;
302 wrmask = alu->dest.write_mask;
303 }
304
305 dst = ir3_get_dst(ctx, &alu->dest.dest, dst_sz);
306
307 /* Vectors are special in that they have non-scalarized writemasks,
308 * and just take the first swizzle channel for each argument in
309 * order into each writemask channel.
310 */
311 if ((alu->op == nir_op_vec2) ||
312 (alu->op == nir_op_vec3) ||
313 (alu->op == nir_op_vec4)) {
314
315 for (int i = 0; i < info->num_inputs; i++) {
316 nir_alu_src *asrc = &alu->src[i];
317
318 compile_assert(ctx, !asrc->abs);
319 compile_assert(ctx, !asrc->negate);
320
321 src[i] = ir3_get_src(ctx, &asrc->src)[asrc->swizzle[0]];
322 if (!src[i])
323 src[i] = create_immed(ctx->block, 0);
324 dst[i] = ir3_MOV(b, src[i], TYPE_U32);
325 }
326
327 ir3_put_dst(ctx, &alu->dest.dest);
328 return;
329 }
330
331 /* We also get mov's with more than one component for mov's so
332 * handle those specially:
333 */
334 if ((alu->op == nir_op_imov) || (alu->op == nir_op_fmov)) {
335 type_t type = (alu->op == nir_op_imov) ? TYPE_U32 : TYPE_F32;
336 nir_alu_src *asrc = &alu->src[0];
337 struct ir3_instruction *const *src0 = ir3_get_src(ctx, &asrc->src);
338
339 for (unsigned i = 0; i < dst_sz; i++) {
340 if (wrmask & (1 << i)) {
341 dst[i] = ir3_MOV(b, src0[asrc->swizzle[i]], type);
342 } else {
343 dst[i] = NULL;
344 }
345 }
346
347 ir3_put_dst(ctx, &alu->dest.dest);
348 return;
349 }
350
351 /* General case: We can just grab the one used channel per src. */
352 for (int i = 0; i < info->num_inputs; i++) {
353 unsigned chan = ffs(alu->dest.write_mask) - 1;
354 nir_alu_src *asrc = &alu->src[i];
355
356 compile_assert(ctx, !asrc->abs);
357 compile_assert(ctx, !asrc->negate);
358
359 src[i] = ir3_get_src(ctx, &asrc->src)[asrc->swizzle[chan]];
360 bs[i] = nir_src_bit_size(asrc->src);
361
362 compile_assert(ctx, src[i]);
363 }
364
365 switch (alu->op) {
366 case nir_op_f2f32:
367 case nir_op_f2f16_rtne:
368 case nir_op_f2f16_rtz:
369 case nir_op_f2f16:
370 case nir_op_f2i32:
371 case nir_op_f2i16:
372 case nir_op_f2i8:
373 case nir_op_f2u32:
374 case nir_op_f2u16:
375 case nir_op_f2u8:
376 case nir_op_i2f32:
377 case nir_op_i2f16:
378 case nir_op_i2i32:
379 case nir_op_i2i16:
380 case nir_op_i2i8:
381 case nir_op_u2f32:
382 case nir_op_u2f16:
383 case nir_op_u2u32:
384 case nir_op_u2u16:
385 case nir_op_u2u8:
386 dst[0] = create_cov(ctx, src[0], bs[0], alu->op);
387 break;
388 case nir_op_f2b32:
389 dst[0] = ir3_CMPS_F(b, src[0], 0, create_immed(b, fui(0.0)), 0);
390 dst[0]->cat2.condition = IR3_COND_NE;
391 dst[0] = ir3_n2b(b, dst[0]);
392 break;
393 case nir_op_b2f16:
394 case nir_op_b2f32:
395 dst[0] = ir3_COV(b, ir3_b2n(b, src[0]), TYPE_U32, TYPE_F32);
396 break;
397 case nir_op_b2i8:
398 case nir_op_b2i16:
399 case nir_op_b2i32:
400 dst[0] = ir3_b2n(b, src[0]);
401 break;
402 case nir_op_i2b32:
403 dst[0] = ir3_CMPS_S(b, src[0], 0, create_immed(b, 0), 0);
404 dst[0]->cat2.condition = IR3_COND_NE;
405 dst[0] = ir3_n2b(b, dst[0]);
406 break;
407
408 case nir_op_fneg:
409 dst[0] = ir3_ABSNEG_F(b, src[0], IR3_REG_FNEG);
410 break;
411 case nir_op_fabs:
412 dst[0] = ir3_ABSNEG_F(b, src[0], IR3_REG_FABS);
413 break;
414 case nir_op_fmax:
415 dst[0] = ir3_MAX_F(b, src[0], 0, src[1], 0);
416 break;
417 case nir_op_fmin:
418 dst[0] = ir3_MIN_F(b, src[0], 0, src[1], 0);
419 break;
420 case nir_op_fsat:
421 /* if there is just a single use of the src, and it supports
422 * (sat) bit, we can just fold the (sat) flag back to the
423 * src instruction and create a mov. This is easier for cp
424 * to eliminate.
425 *
426 * TODO probably opc_cat==4 is ok too
427 */
428 if (alu->src[0].src.is_ssa &&
429 (list_length(&alu->src[0].src.ssa->uses) == 1) &&
430 ((opc_cat(src[0]->opc) == 2) || (opc_cat(src[0]->opc) == 3))) {
431 src[0]->flags |= IR3_INSTR_SAT;
432 dst[0] = ir3_MOV(b, src[0], TYPE_U32);
433 } else {
434 /* otherwise generate a max.f that saturates.. blob does
435 * similar (generating a cat2 mov using max.f)
436 */
437 dst[0] = ir3_MAX_F(b, src[0], 0, src[0], 0);
438 dst[0]->flags |= IR3_INSTR_SAT;
439 }
440 break;
441 case nir_op_fmul:
442 dst[0] = ir3_MUL_F(b, src[0], 0, src[1], 0);
443 break;
444 case nir_op_fadd:
445 dst[0] = ir3_ADD_F(b, src[0], 0, src[1], 0);
446 break;
447 case nir_op_fsub:
448 dst[0] = ir3_ADD_F(b, src[0], 0, src[1], IR3_REG_FNEG);
449 break;
450 case nir_op_ffma:
451 dst[0] = ir3_MAD_F32(b, src[0], 0, src[1], 0, src[2], 0);
452 break;
453 case nir_op_fddx:
454 dst[0] = ir3_DSX(b, src[0], 0);
455 dst[0]->cat5.type = TYPE_F32;
456 break;
457 case nir_op_fddy:
458 dst[0] = ir3_DSY(b, src[0], 0);
459 dst[0]->cat5.type = TYPE_F32;
460 break;
461 break;
462 case nir_op_flt32:
463 dst[0] = ir3_CMPS_F(b, src[0], 0, src[1], 0);
464 dst[0]->cat2.condition = IR3_COND_LT;
465 dst[0] = ir3_n2b(b, dst[0]);
466 break;
467 case nir_op_fge32:
468 dst[0] = ir3_CMPS_F(b, src[0], 0, src[1], 0);
469 dst[0]->cat2.condition = IR3_COND_GE;
470 dst[0] = ir3_n2b(b, dst[0]);
471 break;
472 case nir_op_feq32:
473 dst[0] = ir3_CMPS_F(b, src[0], 0, src[1], 0);
474 dst[0]->cat2.condition = IR3_COND_EQ;
475 dst[0] = ir3_n2b(b, dst[0]);
476 break;
477 case nir_op_fne32:
478 dst[0] = ir3_CMPS_F(b, src[0], 0, src[1], 0);
479 dst[0]->cat2.condition = IR3_COND_NE;
480 dst[0] = ir3_n2b(b, dst[0]);
481 break;
482 case nir_op_fceil:
483 dst[0] = ir3_CEIL_F(b, src[0], 0);
484 break;
485 case nir_op_ffloor:
486 dst[0] = ir3_FLOOR_F(b, src[0], 0);
487 break;
488 case nir_op_ftrunc:
489 dst[0] = ir3_TRUNC_F(b, src[0], 0);
490 break;
491 case nir_op_fround_even:
492 dst[0] = ir3_RNDNE_F(b, src[0], 0);
493 break;
494 case nir_op_fsign:
495 dst[0] = ir3_SIGN_F(b, src[0], 0);
496 break;
497
498 case nir_op_fsin:
499 dst[0] = ir3_SIN(b, src[0], 0);
500 break;
501 case nir_op_fcos:
502 dst[0] = ir3_COS(b, src[0], 0);
503 break;
504 case nir_op_frsq:
505 dst[0] = ir3_RSQ(b, src[0], 0);
506 break;
507 case nir_op_frcp:
508 dst[0] = ir3_RCP(b, src[0], 0);
509 break;
510 case nir_op_flog2:
511 dst[0] = ir3_LOG2(b, src[0], 0);
512 break;
513 case nir_op_fexp2:
514 dst[0] = ir3_EXP2(b, src[0], 0);
515 break;
516 case nir_op_fsqrt:
517 dst[0] = ir3_SQRT(b, src[0], 0);
518 break;
519
520 case nir_op_iabs:
521 dst[0] = ir3_ABSNEG_S(b, src[0], IR3_REG_SABS);
522 break;
523 case nir_op_iadd:
524 dst[0] = ir3_ADD_U(b, src[0], 0, src[1], 0);
525 break;
526 case nir_op_iand:
527 dst[0] = ir3_AND_B(b, src[0], 0, src[1], 0);
528 break;
529 case nir_op_imax:
530 dst[0] = ir3_MAX_S(b, src[0], 0, src[1], 0);
531 break;
532 case nir_op_umax:
533 dst[0] = ir3_MAX_U(b, src[0], 0, src[1], 0);
534 break;
535 case nir_op_imin:
536 dst[0] = ir3_MIN_S(b, src[0], 0, src[1], 0);
537 break;
538 case nir_op_umin:
539 dst[0] = ir3_MIN_U(b, src[0], 0, src[1], 0);
540 break;
541 case nir_op_imul:
542 /*
543 * dst = (al * bl) + (ah * bl << 16) + (al * bh << 16)
544 * mull.u tmp0, a, b ; mul low, i.e. al * bl
545 * madsh.m16 tmp1, a, b, tmp0 ; mul-add shift high mix, i.e. ah * bl << 16
546 * madsh.m16 dst, b, a, tmp1 ; i.e. al * bh << 16
547 */
548 dst[0] = ir3_MADSH_M16(b, src[1], 0, src[0], 0,
549 ir3_MADSH_M16(b, src[0], 0, src[1], 0,
550 ir3_MULL_U(b, src[0], 0, src[1], 0), 0), 0);
551 break;
552 case nir_op_ineg:
553 dst[0] = ir3_ABSNEG_S(b, src[0], IR3_REG_SNEG);
554 break;
555 case nir_op_inot:
556 dst[0] = ir3_NOT_B(b, src[0], 0);
557 break;
558 case nir_op_ior:
559 dst[0] = ir3_OR_B(b, src[0], 0, src[1], 0);
560 break;
561 case nir_op_ishl:
562 dst[0] = ir3_SHL_B(b, src[0], 0, src[1], 0);
563 break;
564 case nir_op_ishr:
565 dst[0] = ir3_ASHR_B(b, src[0], 0, src[1], 0);
566 break;
567 case nir_op_isub:
568 dst[0] = ir3_SUB_U(b, src[0], 0, src[1], 0);
569 break;
570 case nir_op_ixor:
571 dst[0] = ir3_XOR_B(b, src[0], 0, src[1], 0);
572 break;
573 case nir_op_ushr:
574 dst[0] = ir3_SHR_B(b, src[0], 0, src[1], 0);
575 break;
576 case nir_op_ilt32:
577 dst[0] = ir3_CMPS_S(b, src[0], 0, src[1], 0);
578 dst[0]->cat2.condition = IR3_COND_LT;
579 dst[0] = ir3_n2b(b, dst[0]);
580 break;
581 case nir_op_ige32:
582 dst[0] = ir3_CMPS_S(b, src[0], 0, src[1], 0);
583 dst[0]->cat2.condition = IR3_COND_GE;
584 dst[0] = ir3_n2b(b, dst[0]);
585 break;
586 case nir_op_ieq32:
587 dst[0] = ir3_CMPS_S(b, src[0], 0, src[1], 0);
588 dst[0]->cat2.condition = IR3_COND_EQ;
589 dst[0] = ir3_n2b(b, dst[0]);
590 break;
591 case nir_op_ine32:
592 dst[0] = ir3_CMPS_S(b, src[0], 0, src[1], 0);
593 dst[0]->cat2.condition = IR3_COND_NE;
594 dst[0] = ir3_n2b(b, dst[0]);
595 break;
596 case nir_op_ult32:
597 dst[0] = ir3_CMPS_U(b, src[0], 0, src[1], 0);
598 dst[0]->cat2.condition = IR3_COND_LT;
599 dst[0] = ir3_n2b(b, dst[0]);
600 break;
601 case nir_op_uge32:
602 dst[0] = ir3_CMPS_U(b, src[0], 0, src[1], 0);
603 dst[0]->cat2.condition = IR3_COND_GE;
604 dst[0] = ir3_n2b(b, dst[0]);
605 break;
606
607 case nir_op_b32csel: {
608 struct ir3_instruction *cond = ir3_b2n(b, src[0]);
609 compile_assert(ctx, bs[1] == bs[2]);
610 /* the boolean condition is 32b even if src[1] and src[2] are
611 * half-precision, but sel.b16 wants all three src's to be the
612 * same type.
613 */
614 if (bs[1] < 32)
615 cond = ir3_COV(b, cond, TYPE_U32, TYPE_U16);
616 dst[0] = ir3_SEL_B32(b, src[1], 0, cond, 0, src[2], 0);
617 break;
618 }
619 case nir_op_bit_count:
620 dst[0] = ir3_CBITS_B(b, src[0], 0);
621 break;
622 case nir_op_ifind_msb: {
623 struct ir3_instruction *cmp;
624 dst[0] = ir3_CLZ_S(b, src[0], 0);
625 cmp = ir3_CMPS_S(b, dst[0], 0, create_immed(b, 0), 0);
626 cmp->cat2.condition = IR3_COND_GE;
627 dst[0] = ir3_SEL_B32(b,
628 ir3_SUB_U(b, create_immed(b, 31), 0, dst[0], 0), 0,
629 cmp, 0, dst[0], 0);
630 break;
631 }
632 case nir_op_ufind_msb:
633 dst[0] = ir3_CLZ_B(b, src[0], 0);
634 dst[0] = ir3_SEL_B32(b,
635 ir3_SUB_U(b, create_immed(b, 31), 0, dst[0], 0), 0,
636 src[0], 0, dst[0], 0);
637 break;
638 case nir_op_find_lsb:
639 dst[0] = ir3_BFREV_B(b, src[0], 0);
640 dst[0] = ir3_CLZ_B(b, dst[0], 0);
641 break;
642 case nir_op_bitfield_reverse:
643 dst[0] = ir3_BFREV_B(b, src[0], 0);
644 break;
645
646 default:
647 ir3_context_error(ctx, "Unhandled ALU op: %s\n",
648 nir_op_infos[alu->op].name);
649 break;
650 }
651
652 ir3_put_dst(ctx, &alu->dest.dest);
653 }
654
655 /* handles direct/indirect UBO reads: */
656 static void
657 emit_intrinsic_load_ubo(struct ir3_context *ctx, nir_intrinsic_instr *intr,
658 struct ir3_instruction **dst)
659 {
660 struct ir3_block *b = ctx->block;
661 struct ir3_instruction *base_lo, *base_hi, *addr, *src0, *src1;
662 nir_const_value *const_offset;
663 /* UBO addresses are the first driver params: */
664 unsigned ubo = regid(ctx->so->constbase.ubo, 0);
665 const unsigned ptrsz = ir3_pointer_size(ctx);
666
667 int off = 0;
668
669 /* First src is ubo index, which could either be an immed or not: */
670 src0 = ir3_get_src(ctx, &intr->src[0])[0];
671 if (is_same_type_mov(src0) &&
672 (src0->regs[1]->flags & IR3_REG_IMMED)) {
673 base_lo = create_uniform(b, ubo + (src0->regs[1]->iim_val * ptrsz));
674 base_hi = create_uniform(b, ubo + (src0->regs[1]->iim_val * ptrsz) + 1);
675 } else {
676 base_lo = create_uniform_indirect(b, ubo, ir3_get_addr(ctx, src0, 4));
677 base_hi = create_uniform_indirect(b, ubo + 1, ir3_get_addr(ctx, src0, 4));
678 }
679
680 /* note: on 32bit gpu's base_hi is ignored and DCE'd */
681 addr = base_lo;
682
683 const_offset = nir_src_as_const_value(intr->src[1]);
684 if (const_offset) {
685 off += const_offset->u32[0];
686 } else {
687 /* For load_ubo_indirect, second src is indirect offset: */
688 src1 = ir3_get_src(ctx, &intr->src[1])[0];
689
690 /* and add offset to addr: */
691 addr = ir3_ADD_S(b, addr, 0, src1, 0);
692 }
693
694 /* if offset is to large to encode in the ldg, split it out: */
695 if ((off + (intr->num_components * 4)) > 1024) {
696 /* split out the minimal amount to improve the odds that
697 * cp can fit the immediate in the add.s instruction:
698 */
699 unsigned off2 = off + (intr->num_components * 4) - 1024;
700 addr = ir3_ADD_S(b, addr, 0, create_immed(b, off2), 0);
701 off -= off2;
702 }
703
704 if (ptrsz == 2) {
705 struct ir3_instruction *carry;
706
707 /* handle 32b rollover, ie:
708 * if (addr < base_lo)
709 * base_hi++
710 */
711 carry = ir3_CMPS_U(b, addr, 0, base_lo, 0);
712 carry->cat2.condition = IR3_COND_LT;
713 base_hi = ir3_ADD_S(b, base_hi, 0, carry, 0);
714
715 addr = ir3_create_collect(ctx, (struct ir3_instruction*[]){ addr, base_hi }, 2);
716 }
717
718 for (int i = 0; i < intr->num_components; i++) {
719 struct ir3_instruction *load =
720 ir3_LDG(b, addr, 0, create_immed(b, 1), 0);
721 load->cat6.type = TYPE_U32;
722 load->cat6.src_offset = off + i * 4; /* byte offset */
723 dst[i] = load;
724 }
725 }
726
727 /* src[] = { block_index } */
728 static void
729 emit_intrinsic_ssbo_size(struct ir3_context *ctx, nir_intrinsic_instr *intr,
730 struct ir3_instruction **dst)
731 {
732 /* SSBO size stored as a const starting at ssbo_sizes: */
733 unsigned blk_idx = nir_src_as_const_value(intr->src[0])->u32[0];
734 unsigned idx = regid(ctx->so->constbase.ssbo_sizes, 0) +
735 ctx->so->const_layout.ssbo_size.off[blk_idx];
736
737 debug_assert(ctx->so->const_layout.ssbo_size.mask & (1 << blk_idx));
738
739 dst[0] = create_uniform(ctx->block, idx);
740 }
741
742 /* src[] = { offset }. const_index[] = { base } */
743 static void
744 emit_intrinsic_load_shared(struct ir3_context *ctx, nir_intrinsic_instr *intr,
745 struct ir3_instruction **dst)
746 {
747 struct ir3_block *b = ctx->block;
748 struct ir3_instruction *ldl, *offset;
749 unsigned base;
750
751 offset = ir3_get_src(ctx, &intr->src[0])[0];
752 base = nir_intrinsic_base(intr);
753
754 ldl = ir3_LDL(b, offset, 0, create_immed(b, intr->num_components), 0);
755 ldl->cat6.src_offset = base;
756 ldl->cat6.type = utype_dst(intr->dest);
757 ldl->regs[0]->wrmask = MASK(intr->num_components);
758
759 ldl->barrier_class = IR3_BARRIER_SHARED_R;
760 ldl->barrier_conflict = IR3_BARRIER_SHARED_W;
761
762 ir3_split_dest(b, dst, ldl, 0, intr->num_components);
763 }
764
765 /* src[] = { value, offset }. const_index[] = { base, write_mask } */
766 static void
767 emit_intrinsic_store_shared(struct ir3_context *ctx, nir_intrinsic_instr *intr)
768 {
769 struct ir3_block *b = ctx->block;
770 struct ir3_instruction *stl, *offset;
771 struct ir3_instruction * const *value;
772 unsigned base, wrmask;
773
774 value = ir3_get_src(ctx, &intr->src[0]);
775 offset = ir3_get_src(ctx, &intr->src[1])[0];
776
777 base = nir_intrinsic_base(intr);
778 wrmask = nir_intrinsic_write_mask(intr);
779
780 /* Combine groups of consecutive enabled channels in one write
781 * message. We use ffs to find the first enabled channel and then ffs on
782 * the bit-inverse, down-shifted writemask to determine the length of
783 * the block of enabled bits.
784 *
785 * (trick stolen from i965's fs_visitor::nir_emit_cs_intrinsic())
786 */
787 while (wrmask) {
788 unsigned first_component = ffs(wrmask) - 1;
789 unsigned length = ffs(~(wrmask >> first_component)) - 1;
790
791 stl = ir3_STL(b, offset, 0,
792 ir3_create_collect(ctx, &value[first_component], length), 0,
793 create_immed(b, length), 0);
794 stl->cat6.dst_offset = first_component + base;
795 stl->cat6.type = utype_src(intr->src[0]);
796 stl->barrier_class = IR3_BARRIER_SHARED_W;
797 stl->barrier_conflict = IR3_BARRIER_SHARED_R | IR3_BARRIER_SHARED_W;
798
799 array_insert(b, b->keeps, stl);
800
801 /* Clear the bits in the writemask that we just wrote, then try
802 * again to see if more channels are left.
803 */
804 wrmask &= (15 << (first_component + length));
805 }
806 }
807
808 /*
809 * CS shared variable atomic intrinsics
810 *
811 * All of the shared variable atomic memory operations read a value from
812 * memory, compute a new value using one of the operations below, write the
813 * new value to memory, and return the original value read.
814 *
815 * All operations take 2 sources except CompSwap that takes 3. These
816 * sources represent:
817 *
818 * 0: The offset into the shared variable storage region that the atomic
819 * operation will operate on.
820 * 1: The data parameter to the atomic function (i.e. the value to add
821 * in shared_atomic_add, etc).
822 * 2: For CompSwap only: the second data parameter.
823 */
824 static struct ir3_instruction *
825 emit_intrinsic_atomic_shared(struct ir3_context *ctx, nir_intrinsic_instr *intr)
826 {
827 struct ir3_block *b = ctx->block;
828 struct ir3_instruction *atomic, *src0, *src1;
829 type_t type = TYPE_U32;
830
831 src0 = ir3_get_src(ctx, &intr->src[0])[0]; /* offset */
832 src1 = ir3_get_src(ctx, &intr->src[1])[0]; /* value */
833
834 switch (intr->intrinsic) {
835 case nir_intrinsic_shared_atomic_add:
836 atomic = ir3_ATOMIC_ADD(b, src0, 0, src1, 0);
837 break;
838 case nir_intrinsic_shared_atomic_imin:
839 atomic = ir3_ATOMIC_MIN(b, src0, 0, src1, 0);
840 type = TYPE_S32;
841 break;
842 case nir_intrinsic_shared_atomic_umin:
843 atomic = ir3_ATOMIC_MIN(b, src0, 0, src1, 0);
844 break;
845 case nir_intrinsic_shared_atomic_imax:
846 atomic = ir3_ATOMIC_MAX(b, src0, 0, src1, 0);
847 type = TYPE_S32;
848 break;
849 case nir_intrinsic_shared_atomic_umax:
850 atomic = ir3_ATOMIC_MAX(b, src0, 0, src1, 0);
851 break;
852 case nir_intrinsic_shared_atomic_and:
853 atomic = ir3_ATOMIC_AND(b, src0, 0, src1, 0);
854 break;
855 case nir_intrinsic_shared_atomic_or:
856 atomic = ir3_ATOMIC_OR(b, src0, 0, src1, 0);
857 break;
858 case nir_intrinsic_shared_atomic_xor:
859 atomic = ir3_ATOMIC_XOR(b, src0, 0, src1, 0);
860 break;
861 case nir_intrinsic_shared_atomic_exchange:
862 atomic = ir3_ATOMIC_XCHG(b, src0, 0, src1, 0);
863 break;
864 case nir_intrinsic_shared_atomic_comp_swap:
865 /* for cmpxchg, src1 is [ui]vec2(data, compare): */
866 src1 = ir3_create_collect(ctx, (struct ir3_instruction*[]){
867 ir3_get_src(ctx, &intr->src[2])[0],
868 src1,
869 }, 2);
870 atomic = ir3_ATOMIC_CMPXCHG(b, src0, 0, src1, 0);
871 break;
872 default:
873 unreachable("boo");
874 }
875
876 atomic->cat6.iim_val = 1;
877 atomic->cat6.d = 1;
878 atomic->cat6.type = type;
879 atomic->barrier_class = IR3_BARRIER_SHARED_W;
880 atomic->barrier_conflict = IR3_BARRIER_SHARED_R | IR3_BARRIER_SHARED_W;
881
882 /* even if nothing consume the result, we can't DCE the instruction: */
883 array_insert(b, b->keeps, atomic);
884
885 return atomic;
886 }
887
888 /* src[] = { deref, coord, sample_index }. const_index[] = {} */
889 static void
890 emit_intrinsic_load_image(struct ir3_context *ctx, nir_intrinsic_instr *intr,
891 struct ir3_instruction **dst)
892 {
893 struct ir3_block *b = ctx->block;
894 const nir_variable *var = nir_intrinsic_get_var(intr, 0);
895 struct ir3_instruction *sam;
896 struct ir3_instruction * const *src0 = ir3_get_src(ctx, &intr->src[1]);
897 struct ir3_instruction *coords[4];
898 unsigned flags, ncoords = ir3_get_image_coords(var, &flags);
899 unsigned slot = ir3_get_image_slot(nir_src_as_deref(intr->src[0]));
900 unsigned tex_idx = ir3_image_to_tex(&ctx->so->image_mapping, slot);
901 type_t type = ir3_get_image_type(var);
902
903 /* hmm, this seems a bit odd, but it is what blob does and (at least
904 * a5xx) just faults on bogus addresses otherwise:
905 */
906 if (flags & IR3_INSTR_3D) {
907 flags &= ~IR3_INSTR_3D;
908 flags |= IR3_INSTR_A;
909 }
910
911 for (unsigned i = 0; i < ncoords; i++)
912 coords[i] = src0[i];
913
914 if (ncoords == 1)
915 coords[ncoords++] = create_immed(b, 0);
916
917 sam = ir3_SAM(b, OPC_ISAM, type, 0b1111, flags,
918 tex_idx, tex_idx, ir3_create_collect(ctx, coords, ncoords), NULL);
919
920 sam->barrier_class = IR3_BARRIER_IMAGE_R;
921 sam->barrier_conflict = IR3_BARRIER_IMAGE_W;
922
923 ir3_split_dest(b, dst, sam, 0, 4);
924 }
925
926 static void
927 emit_intrinsic_image_size(struct ir3_context *ctx, nir_intrinsic_instr *intr,
928 struct ir3_instruction **dst)
929 {
930 struct ir3_block *b = ctx->block;
931 const nir_variable *var = nir_intrinsic_get_var(intr, 0);
932 unsigned slot = ir3_get_image_slot(nir_src_as_deref(intr->src[0]));
933 unsigned tex_idx = ir3_image_to_tex(&ctx->so->image_mapping, slot);
934 struct ir3_instruction *sam, *lod;
935 unsigned flags, ncoords = ir3_get_image_coords(var, &flags);
936
937 lod = create_immed(b, 0);
938 sam = ir3_SAM(b, OPC_GETSIZE, TYPE_U32, 0b1111, flags,
939 tex_idx, tex_idx, lod, NULL);
940
941 /* Array size actually ends up in .w rather than .z. This doesn't
942 * matter for miplevel 0, but for higher mips the value in z is
943 * minified whereas w stays. Also, the value in TEX_CONST_3_DEPTH is
944 * returned, which means that we have to add 1 to it for arrays for
945 * a3xx.
946 *
947 * Note use a temporary dst and then copy, since the size of the dst
948 * array that is passed in is based on nir's understanding of the
949 * result size, not the hardware's
950 */
951 struct ir3_instruction *tmp[4];
952
953 ir3_split_dest(b, tmp, sam, 0, 4);
954
955 /* get_size instruction returns size in bytes instead of texels
956 * for imageBuffer, so we need to divide it by the pixel size
957 * of the image format.
958 *
959 * TODO: This is at least true on a5xx. Check other gens.
960 */
961 enum glsl_sampler_dim dim =
962 glsl_get_sampler_dim(glsl_without_array(var->type));
963 if (dim == GLSL_SAMPLER_DIM_BUF) {
964 /* Since all the possible values the divisor can take are
965 * power-of-two (4, 8, or 16), the division is implemented
966 * as a shift-right.
967 * During shader setup, the log2 of the image format's
968 * bytes-per-pixel should have been emitted in 2nd slot of
969 * image_dims. See ir3_shader::emit_image_dims().
970 */
971 unsigned cb = regid(ctx->so->constbase.image_dims, 0) +
972 ctx->so->const_layout.image_dims.off[var->data.driver_location];
973 struct ir3_instruction *aux = create_uniform(b, cb + 1);
974
975 tmp[0] = ir3_SHR_B(b, tmp[0], 0, aux, 0);
976 }
977
978 for (unsigned i = 0; i < ncoords; i++)
979 dst[i] = tmp[i];
980
981 if (flags & IR3_INSTR_A) {
982 if (ctx->compiler->levels_add_one) {
983 dst[ncoords-1] = ir3_ADD_U(b, tmp[3], 0, create_immed(b, 1), 0);
984 } else {
985 dst[ncoords-1] = ir3_MOV(b, tmp[3], TYPE_U32);
986 }
987 }
988 }
989
990 static void
991 emit_intrinsic_barrier(struct ir3_context *ctx, nir_intrinsic_instr *intr)
992 {
993 struct ir3_block *b = ctx->block;
994 struct ir3_instruction *barrier;
995
996 switch (intr->intrinsic) {
997 case nir_intrinsic_barrier:
998 barrier = ir3_BAR(b);
999 barrier->cat7.g = true;
1000 barrier->cat7.l = true;
1001 barrier->flags = IR3_INSTR_SS | IR3_INSTR_SY;
1002 barrier->barrier_class = IR3_BARRIER_EVERYTHING;
1003 break;
1004 case nir_intrinsic_memory_barrier:
1005 barrier = ir3_FENCE(b);
1006 barrier->cat7.g = true;
1007 barrier->cat7.r = true;
1008 barrier->cat7.w = true;
1009 barrier->barrier_class = IR3_BARRIER_IMAGE_W |
1010 IR3_BARRIER_BUFFER_W;
1011 barrier->barrier_conflict =
1012 IR3_BARRIER_IMAGE_R | IR3_BARRIER_IMAGE_W |
1013 IR3_BARRIER_BUFFER_R | IR3_BARRIER_BUFFER_W;
1014 break;
1015 case nir_intrinsic_memory_barrier_atomic_counter:
1016 case nir_intrinsic_memory_barrier_buffer:
1017 barrier = ir3_FENCE(b);
1018 barrier->cat7.g = true;
1019 barrier->cat7.r = true;
1020 barrier->cat7.w = true;
1021 barrier->barrier_class = IR3_BARRIER_BUFFER_W;
1022 barrier->barrier_conflict = IR3_BARRIER_BUFFER_R |
1023 IR3_BARRIER_BUFFER_W;
1024 break;
1025 case nir_intrinsic_memory_barrier_image:
1026 // TODO double check if this should have .g set
1027 barrier = ir3_FENCE(b);
1028 barrier->cat7.g = true;
1029 barrier->cat7.r = true;
1030 barrier->cat7.w = true;
1031 barrier->barrier_class = IR3_BARRIER_IMAGE_W;
1032 barrier->barrier_conflict = IR3_BARRIER_IMAGE_R |
1033 IR3_BARRIER_IMAGE_W;
1034 break;
1035 case nir_intrinsic_memory_barrier_shared:
1036 barrier = ir3_FENCE(b);
1037 barrier->cat7.g = true;
1038 barrier->cat7.l = true;
1039 barrier->cat7.r = true;
1040 barrier->cat7.w = true;
1041 barrier->barrier_class = IR3_BARRIER_SHARED_W;
1042 barrier->barrier_conflict = IR3_BARRIER_SHARED_R |
1043 IR3_BARRIER_SHARED_W;
1044 break;
1045 case nir_intrinsic_group_memory_barrier:
1046 barrier = ir3_FENCE(b);
1047 barrier->cat7.g = true;
1048 barrier->cat7.l = true;
1049 barrier->cat7.r = true;
1050 barrier->cat7.w = true;
1051 barrier->barrier_class = IR3_BARRIER_SHARED_W |
1052 IR3_BARRIER_IMAGE_W |
1053 IR3_BARRIER_BUFFER_W;
1054 barrier->barrier_conflict =
1055 IR3_BARRIER_SHARED_R | IR3_BARRIER_SHARED_W |
1056 IR3_BARRIER_IMAGE_R | IR3_BARRIER_IMAGE_W |
1057 IR3_BARRIER_BUFFER_R | IR3_BARRIER_BUFFER_W;
1058 break;
1059 default:
1060 unreachable("boo");
1061 }
1062
1063 /* make sure barrier doesn't get DCE'd */
1064 array_insert(b, b->keeps, barrier);
1065 }
1066
1067 static void add_sysval_input_compmask(struct ir3_context *ctx,
1068 gl_system_value slot, unsigned compmask,
1069 struct ir3_instruction *instr)
1070 {
1071 struct ir3_shader_variant *so = ctx->so;
1072 unsigned r = regid(so->inputs_count, 0);
1073 unsigned n = so->inputs_count++;
1074
1075 so->inputs[n].sysval = true;
1076 so->inputs[n].slot = slot;
1077 so->inputs[n].compmask = compmask;
1078 so->inputs[n].regid = r;
1079 so->inputs[n].interpolate = INTERP_MODE_FLAT;
1080 so->total_in++;
1081
1082 ctx->ir->ninputs = MAX2(ctx->ir->ninputs, r + 1);
1083 ctx->ir->inputs[r] = instr;
1084 }
1085
1086 static void add_sysval_input(struct ir3_context *ctx, gl_system_value slot,
1087 struct ir3_instruction *instr)
1088 {
1089 add_sysval_input_compmask(ctx, slot, 0x1, instr);
1090 }
1091
1092 static void
1093 emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr)
1094 {
1095 const nir_intrinsic_info *info = &nir_intrinsic_infos[intr->intrinsic];
1096 struct ir3_instruction **dst;
1097 struct ir3_instruction * const *src;
1098 struct ir3_block *b = ctx->block;
1099 nir_const_value *const_offset;
1100 int idx, comp;
1101
1102 if (info->has_dest) {
1103 unsigned n = nir_intrinsic_dest_components(intr);
1104 dst = ir3_get_dst(ctx, &intr->dest, n);
1105 } else {
1106 dst = NULL;
1107 }
1108
1109 switch (intr->intrinsic) {
1110 case nir_intrinsic_load_uniform:
1111 idx = nir_intrinsic_base(intr);
1112 const_offset = nir_src_as_const_value(intr->src[0]);
1113 if (const_offset) {
1114 idx += const_offset->u32[0];
1115 for (int i = 0; i < intr->num_components; i++) {
1116 unsigned n = idx * 4 + i;
1117 dst[i] = create_uniform(b, n);
1118 }
1119 } else {
1120 src = ir3_get_src(ctx, &intr->src[0]);
1121 for (int i = 0; i < intr->num_components; i++) {
1122 int n = idx * 4 + i;
1123 dst[i] = create_uniform_indirect(b, n,
1124 ir3_get_addr(ctx, src[0], 4));
1125 }
1126 /* NOTE: if relative addressing is used, we set
1127 * constlen in the compiler (to worst-case value)
1128 * since we don't know in the assembler what the max
1129 * addr reg value can be:
1130 */
1131 ctx->so->constlen = ctx->s->num_uniforms;
1132 }
1133 break;
1134 case nir_intrinsic_load_ubo:
1135 emit_intrinsic_load_ubo(ctx, intr, dst);
1136 break;
1137 case nir_intrinsic_load_input:
1138 idx = nir_intrinsic_base(intr);
1139 comp = nir_intrinsic_component(intr);
1140 const_offset = nir_src_as_const_value(intr->src[0]);
1141 if (const_offset) {
1142 idx += const_offset->u32[0];
1143 for (int i = 0; i < intr->num_components; i++) {
1144 unsigned n = idx * 4 + i + comp;
1145 dst[i] = ctx->ir->inputs[n];
1146 }
1147 } else {
1148 src = ir3_get_src(ctx, &intr->src[0]);
1149 struct ir3_instruction *collect =
1150 ir3_create_collect(ctx, ctx->ir->inputs, ctx->ir->ninputs);
1151 struct ir3_instruction *addr = ir3_get_addr(ctx, src[0], 4);
1152 for (int i = 0; i < intr->num_components; i++) {
1153 unsigned n = idx * 4 + i + comp;
1154 dst[i] = create_indirect_load(ctx, ctx->ir->ninputs,
1155 n, addr, collect);
1156 }
1157 }
1158 break;
1159 /* All SSBO intrinsics should have been lowered by 'lower_io_offsets'
1160 * pass and replaced by an ir3-specifc version that adds the
1161 * dword-offset in the last source.
1162 */
1163 case nir_intrinsic_load_ssbo_ir3:
1164 ctx->funcs->emit_intrinsic_load_ssbo(ctx, intr, dst);
1165 break;
1166 case nir_intrinsic_store_ssbo_ir3:
1167 ctx->funcs->emit_intrinsic_store_ssbo(ctx, intr);
1168 break;
1169 case nir_intrinsic_get_buffer_size:
1170 emit_intrinsic_ssbo_size(ctx, intr, dst);
1171 break;
1172 case nir_intrinsic_ssbo_atomic_add_ir3:
1173 case nir_intrinsic_ssbo_atomic_imin_ir3:
1174 case nir_intrinsic_ssbo_atomic_umin_ir3:
1175 case nir_intrinsic_ssbo_atomic_imax_ir3:
1176 case nir_intrinsic_ssbo_atomic_umax_ir3:
1177 case nir_intrinsic_ssbo_atomic_and_ir3:
1178 case nir_intrinsic_ssbo_atomic_or_ir3:
1179 case nir_intrinsic_ssbo_atomic_xor_ir3:
1180 case nir_intrinsic_ssbo_atomic_exchange_ir3:
1181 case nir_intrinsic_ssbo_atomic_comp_swap_ir3:
1182 dst[0] = ctx->funcs->emit_intrinsic_atomic_ssbo(ctx, intr);
1183 break;
1184 case nir_intrinsic_load_shared:
1185 emit_intrinsic_load_shared(ctx, intr, dst);
1186 break;
1187 case nir_intrinsic_store_shared:
1188 emit_intrinsic_store_shared(ctx, intr);
1189 break;
1190 case nir_intrinsic_shared_atomic_add:
1191 case nir_intrinsic_shared_atomic_imin:
1192 case nir_intrinsic_shared_atomic_umin:
1193 case nir_intrinsic_shared_atomic_imax:
1194 case nir_intrinsic_shared_atomic_umax:
1195 case nir_intrinsic_shared_atomic_and:
1196 case nir_intrinsic_shared_atomic_or:
1197 case nir_intrinsic_shared_atomic_xor:
1198 case nir_intrinsic_shared_atomic_exchange:
1199 case nir_intrinsic_shared_atomic_comp_swap:
1200 dst[0] = emit_intrinsic_atomic_shared(ctx, intr);
1201 break;
1202 case nir_intrinsic_image_deref_load:
1203 emit_intrinsic_load_image(ctx, intr, dst);
1204 break;
1205 case nir_intrinsic_image_deref_store:
1206 ctx->funcs->emit_intrinsic_store_image(ctx, intr);
1207 break;
1208 case nir_intrinsic_image_deref_size:
1209 emit_intrinsic_image_size(ctx, intr, dst);
1210 break;
1211 case nir_intrinsic_image_deref_atomic_add:
1212 case nir_intrinsic_image_deref_atomic_min:
1213 case nir_intrinsic_image_deref_atomic_max:
1214 case nir_intrinsic_image_deref_atomic_and:
1215 case nir_intrinsic_image_deref_atomic_or:
1216 case nir_intrinsic_image_deref_atomic_xor:
1217 case nir_intrinsic_image_deref_atomic_exchange:
1218 case nir_intrinsic_image_deref_atomic_comp_swap:
1219 dst[0] = ctx->funcs->emit_intrinsic_atomic_image(ctx, intr);
1220 break;
1221 case nir_intrinsic_barrier:
1222 case nir_intrinsic_memory_barrier:
1223 case nir_intrinsic_group_memory_barrier:
1224 case nir_intrinsic_memory_barrier_atomic_counter:
1225 case nir_intrinsic_memory_barrier_buffer:
1226 case nir_intrinsic_memory_barrier_image:
1227 case nir_intrinsic_memory_barrier_shared:
1228 emit_intrinsic_barrier(ctx, intr);
1229 /* note that blk ptr no longer valid, make that obvious: */
1230 b = NULL;
1231 break;
1232 case nir_intrinsic_store_output:
1233 idx = nir_intrinsic_base(intr);
1234 comp = nir_intrinsic_component(intr);
1235 const_offset = nir_src_as_const_value(intr->src[1]);
1236 compile_assert(ctx, const_offset != NULL);
1237 idx += const_offset->u32[0];
1238
1239 src = ir3_get_src(ctx, &intr->src[0]);
1240 for (int i = 0; i < intr->num_components; i++) {
1241 unsigned n = idx * 4 + i + comp;
1242 ctx->ir->outputs[n] = src[i];
1243 }
1244 break;
1245 case nir_intrinsic_load_base_vertex:
1246 case nir_intrinsic_load_first_vertex:
1247 if (!ctx->basevertex) {
1248 ctx->basevertex = create_driver_param(ctx, IR3_DP_VTXID_BASE);
1249 add_sysval_input(ctx, SYSTEM_VALUE_FIRST_VERTEX, ctx->basevertex);
1250 }
1251 dst[0] = ctx->basevertex;
1252 break;
1253 case nir_intrinsic_load_vertex_id_zero_base:
1254 case nir_intrinsic_load_vertex_id:
1255 if (!ctx->vertex_id) {
1256 gl_system_value sv = (intr->intrinsic == nir_intrinsic_load_vertex_id) ?
1257 SYSTEM_VALUE_VERTEX_ID : SYSTEM_VALUE_VERTEX_ID_ZERO_BASE;
1258 ctx->vertex_id = create_input(ctx, 0);
1259 add_sysval_input(ctx, sv, ctx->vertex_id);
1260 }
1261 dst[0] = ctx->vertex_id;
1262 break;
1263 case nir_intrinsic_load_instance_id:
1264 if (!ctx->instance_id) {
1265 ctx->instance_id = create_input(ctx, 0);
1266 add_sysval_input(ctx, SYSTEM_VALUE_INSTANCE_ID,
1267 ctx->instance_id);
1268 }
1269 dst[0] = ctx->instance_id;
1270 break;
1271 case nir_intrinsic_load_sample_id:
1272 case nir_intrinsic_load_sample_id_no_per_sample:
1273 if (!ctx->samp_id) {
1274 ctx->samp_id = create_input(ctx, 0);
1275 ctx->samp_id->regs[0]->flags |= IR3_REG_HALF;
1276 add_sysval_input(ctx, SYSTEM_VALUE_SAMPLE_ID,
1277 ctx->samp_id);
1278 }
1279 dst[0] = ir3_COV(b, ctx->samp_id, TYPE_U16, TYPE_U32);
1280 break;
1281 case nir_intrinsic_load_sample_mask_in:
1282 if (!ctx->samp_mask_in) {
1283 ctx->samp_mask_in = create_input(ctx, 0);
1284 add_sysval_input(ctx, SYSTEM_VALUE_SAMPLE_MASK_IN,
1285 ctx->samp_mask_in);
1286 }
1287 dst[0] = ctx->samp_mask_in;
1288 break;
1289 case nir_intrinsic_load_user_clip_plane:
1290 idx = nir_intrinsic_ucp_id(intr);
1291 for (int i = 0; i < intr->num_components; i++) {
1292 unsigned n = idx * 4 + i;
1293 dst[i] = create_driver_param(ctx, IR3_DP_UCP0_X + n);
1294 }
1295 break;
1296 case nir_intrinsic_load_front_face:
1297 if (!ctx->frag_face) {
1298 ctx->so->frag_face = true;
1299 ctx->frag_face = create_input(ctx, 0);
1300 add_sysval_input(ctx, SYSTEM_VALUE_FRONT_FACE, ctx->frag_face);
1301 ctx->frag_face->regs[0]->flags |= IR3_REG_HALF;
1302 }
1303 /* for fragface, we get -1 for back and 0 for front. However this is
1304 * the inverse of what nir expects (where ~0 is true).
1305 */
1306 dst[0] = ir3_COV(b, ctx->frag_face, TYPE_S16, TYPE_S32);
1307 dst[0] = ir3_NOT_B(b, dst[0], 0);
1308 break;
1309 case nir_intrinsic_load_local_invocation_id:
1310 if (!ctx->local_invocation_id) {
1311 ctx->local_invocation_id = create_input_compmask(ctx, 0, 0x7);
1312 add_sysval_input_compmask(ctx, SYSTEM_VALUE_LOCAL_INVOCATION_ID,
1313 0x7, ctx->local_invocation_id);
1314 }
1315 ir3_split_dest(b, dst, ctx->local_invocation_id, 0, 3);
1316 break;
1317 case nir_intrinsic_load_work_group_id:
1318 if (!ctx->work_group_id) {
1319 ctx->work_group_id = create_input_compmask(ctx, 0, 0x7);
1320 add_sysval_input_compmask(ctx, SYSTEM_VALUE_WORK_GROUP_ID,
1321 0x7, ctx->work_group_id);
1322 ctx->work_group_id->regs[0]->flags |= IR3_REG_HIGH;
1323 }
1324 ir3_split_dest(b, dst, ctx->work_group_id, 0, 3);
1325 break;
1326 case nir_intrinsic_load_num_work_groups:
1327 for (int i = 0; i < intr->num_components; i++) {
1328 dst[i] = create_driver_param(ctx, IR3_DP_NUM_WORK_GROUPS_X + i);
1329 }
1330 break;
1331 case nir_intrinsic_load_local_group_size:
1332 for (int i = 0; i < intr->num_components; i++) {
1333 dst[i] = create_driver_param(ctx, IR3_DP_LOCAL_GROUP_SIZE_X + i);
1334 }
1335 break;
1336 case nir_intrinsic_discard_if:
1337 case nir_intrinsic_discard: {
1338 struct ir3_instruction *cond, *kill;
1339
1340 if (intr->intrinsic == nir_intrinsic_discard_if) {
1341 /* conditional discard: */
1342 src = ir3_get_src(ctx, &intr->src[0]);
1343 cond = ir3_b2n(b, src[0]);
1344 } else {
1345 /* unconditional discard: */
1346 cond = create_immed(b, 1);
1347 }
1348
1349 /* NOTE: only cmps.*.* can write p0.x: */
1350 cond = ir3_CMPS_S(b, cond, 0, create_immed(b, 0), 0);
1351 cond->cat2.condition = IR3_COND_NE;
1352
1353 /* condition always goes in predicate register: */
1354 cond->regs[0]->num = regid(REG_P0, 0);
1355
1356 kill = ir3_KILL(b, cond, 0);
1357 array_insert(ctx->ir, ctx->ir->predicates, kill);
1358
1359 array_insert(b, b->keeps, kill);
1360 ctx->so->has_kill = true;
1361
1362 break;
1363 }
1364 default:
1365 ir3_context_error(ctx, "Unhandled intrinsic type: %s\n",
1366 nir_intrinsic_infos[intr->intrinsic].name);
1367 break;
1368 }
1369
1370 if (info->has_dest)
1371 ir3_put_dst(ctx, &intr->dest);
1372 }
1373
1374 static void
1375 emit_load_const(struct ir3_context *ctx, nir_load_const_instr *instr)
1376 {
1377 struct ir3_instruction **dst = ir3_get_dst_ssa(ctx, &instr->def,
1378 instr->def.num_components);
1379 type_t type = (instr->def.bit_size < 32) ? TYPE_U16 : TYPE_U32;
1380
1381 for (int i = 0; i < instr->def.num_components; i++)
1382 dst[i] = create_immed_typed(ctx->block, instr->value.u32[i], type);
1383 }
1384
1385 static void
1386 emit_undef(struct ir3_context *ctx, nir_ssa_undef_instr *undef)
1387 {
1388 struct ir3_instruction **dst = ir3_get_dst_ssa(ctx, &undef->def,
1389 undef->def.num_components);
1390 type_t type = (undef->def.bit_size < 32) ? TYPE_U16 : TYPE_U32;
1391
1392 /* backend doesn't want undefined instructions, so just plug
1393 * in 0.0..
1394 */
1395 for (int i = 0; i < undef->def.num_components; i++)
1396 dst[i] = create_immed_typed(ctx->block, fui(0.0), type);
1397 }
1398
1399 /*
1400 * texture fetch/sample instructions:
1401 */
1402
1403 static void
1404 tex_info(nir_tex_instr *tex, unsigned *flagsp, unsigned *coordsp)
1405 {
1406 unsigned coords, flags = 0;
1407
1408 /* note: would use tex->coord_components.. except txs.. also,
1409 * since array index goes after shadow ref, we don't want to
1410 * count it:
1411 */
1412 switch (tex->sampler_dim) {
1413 case GLSL_SAMPLER_DIM_1D:
1414 case GLSL_SAMPLER_DIM_BUF:
1415 coords = 1;
1416 break;
1417 case GLSL_SAMPLER_DIM_2D:
1418 case GLSL_SAMPLER_DIM_RECT:
1419 case GLSL_SAMPLER_DIM_EXTERNAL:
1420 case GLSL_SAMPLER_DIM_MS:
1421 coords = 2;
1422 break;
1423 case GLSL_SAMPLER_DIM_3D:
1424 case GLSL_SAMPLER_DIM_CUBE:
1425 coords = 3;
1426 flags |= IR3_INSTR_3D;
1427 break;
1428 default:
1429 unreachable("bad sampler_dim");
1430 }
1431
1432 if (tex->is_shadow && tex->op != nir_texop_lod)
1433 flags |= IR3_INSTR_S;
1434
1435 if (tex->is_array && tex->op != nir_texop_lod)
1436 flags |= IR3_INSTR_A;
1437
1438 *flagsp = flags;
1439 *coordsp = coords;
1440 }
1441
1442 static void
1443 emit_tex(struct ir3_context *ctx, nir_tex_instr *tex)
1444 {
1445 struct ir3_block *b = ctx->block;
1446 struct ir3_instruction **dst, *sam, *src0[12], *src1[4];
1447 struct ir3_instruction * const *coord, * const *off, * const *ddx, * const *ddy;
1448 struct ir3_instruction *lod, *compare, *proj, *sample_index;
1449 bool has_bias = false, has_lod = false, has_proj = false, has_off = false;
1450 unsigned i, coords, flags, ncomp;
1451 unsigned nsrc0 = 0, nsrc1 = 0;
1452 type_t type;
1453 opc_t opc = 0;
1454
1455 ncomp = nir_dest_num_components(tex->dest);
1456
1457 coord = off = ddx = ddy = NULL;
1458 lod = proj = compare = sample_index = NULL;
1459
1460 dst = ir3_get_dst(ctx, &tex->dest, ncomp);
1461
1462 for (unsigned i = 0; i < tex->num_srcs; i++) {
1463 switch (tex->src[i].src_type) {
1464 case nir_tex_src_coord:
1465 coord = ir3_get_src(ctx, &tex->src[i].src);
1466 break;
1467 case nir_tex_src_bias:
1468 lod = ir3_get_src(ctx, &tex->src[i].src)[0];
1469 has_bias = true;
1470 break;
1471 case nir_tex_src_lod:
1472 lod = ir3_get_src(ctx, &tex->src[i].src)[0];
1473 has_lod = true;
1474 break;
1475 case nir_tex_src_comparator: /* shadow comparator */
1476 compare = ir3_get_src(ctx, &tex->src[i].src)[0];
1477 break;
1478 case nir_tex_src_projector:
1479 proj = ir3_get_src(ctx, &tex->src[i].src)[0];
1480 has_proj = true;
1481 break;
1482 case nir_tex_src_offset:
1483 off = ir3_get_src(ctx, &tex->src[i].src);
1484 has_off = true;
1485 break;
1486 case nir_tex_src_ddx:
1487 ddx = ir3_get_src(ctx, &tex->src[i].src);
1488 break;
1489 case nir_tex_src_ddy:
1490 ddy = ir3_get_src(ctx, &tex->src[i].src);
1491 break;
1492 case nir_tex_src_ms_index:
1493 sample_index = ir3_get_src(ctx, &tex->src[i].src)[0];
1494 break;
1495 default:
1496 ir3_context_error(ctx, "Unhandled NIR tex src type: %d\n",
1497 tex->src[i].src_type);
1498 return;
1499 }
1500 }
1501
1502 switch (tex->op) {
1503 case nir_texop_tex: opc = has_lod ? OPC_SAML : OPC_SAM; break;
1504 case nir_texop_txb: opc = OPC_SAMB; break;
1505 case nir_texop_txl: opc = OPC_SAML; break;
1506 case nir_texop_txd: opc = OPC_SAMGQ; break;
1507 case nir_texop_txf: opc = OPC_ISAML; break;
1508 case nir_texop_lod: opc = OPC_GETLOD; break;
1509 case nir_texop_tg4:
1510 /* NOTE: a4xx might need to emulate gather w/ txf (this is
1511 * what blob does, seems gather is broken?), and a3xx did
1512 * not support it (but probably could also emulate).
1513 */
1514 switch (tex->component) {
1515 case 0: opc = OPC_GATHER4R; break;
1516 case 1: opc = OPC_GATHER4G; break;
1517 case 2: opc = OPC_GATHER4B; break;
1518 case 3: opc = OPC_GATHER4A; break;
1519 }
1520 break;
1521 case nir_texop_txf_ms: opc = OPC_ISAMM; break;
1522 case nir_texop_txs:
1523 case nir_texop_query_levels:
1524 case nir_texop_texture_samples:
1525 case nir_texop_samples_identical:
1526 case nir_texop_txf_ms_mcs:
1527 ir3_context_error(ctx, "Unhandled NIR tex type: %d\n", tex->op);
1528 return;
1529 }
1530
1531 tex_info(tex, &flags, &coords);
1532
1533 /*
1534 * lay out the first argument in the proper order:
1535 * - actual coordinates first
1536 * - shadow reference
1537 * - array index
1538 * - projection w
1539 * - starting at offset 4, dpdx.xy, dpdy.xy
1540 *
1541 * bias/lod go into the second arg
1542 */
1543
1544 /* insert tex coords: */
1545 for (i = 0; i < coords; i++)
1546 src0[i] = coord[i];
1547
1548 nsrc0 = i;
1549
1550 /* scale up integer coords for TXF based on the LOD */
1551 if (ctx->compiler->unminify_coords && (opc == OPC_ISAML)) {
1552 assert(has_lod);
1553 for (i = 0; i < coords; i++)
1554 src0[i] = ir3_SHL_B(b, src0[i], 0, lod, 0);
1555 }
1556
1557 if (coords == 1) {
1558 /* hw doesn't do 1d, so we treat it as 2d with
1559 * height of 1, and patch up the y coord.
1560 */
1561 if (is_isam(opc)) {
1562 src0[nsrc0++] = create_immed(b, 0);
1563 } else {
1564 src0[nsrc0++] = create_immed(b, fui(0.5));
1565 }
1566 }
1567
1568 if (tex->is_shadow && tex->op != nir_texop_lod)
1569 src0[nsrc0++] = compare;
1570
1571 if (tex->is_array && tex->op != nir_texop_lod) {
1572 struct ir3_instruction *idx = coord[coords];
1573
1574 /* the array coord for cube arrays needs 0.5 added to it */
1575 if (ctx->compiler->array_index_add_half && !is_isam(opc))
1576 idx = ir3_ADD_F(b, idx, 0, create_immed(b, fui(0.5)), 0);
1577
1578 src0[nsrc0++] = idx;
1579 }
1580
1581 if (has_proj) {
1582 src0[nsrc0++] = proj;
1583 flags |= IR3_INSTR_P;
1584 }
1585
1586 /* pad to 4, then ddx/ddy: */
1587 if (tex->op == nir_texop_txd) {
1588 while (nsrc0 < 4)
1589 src0[nsrc0++] = create_immed(b, fui(0.0));
1590 for (i = 0; i < coords; i++)
1591 src0[nsrc0++] = ddx[i];
1592 if (coords < 2)
1593 src0[nsrc0++] = create_immed(b, fui(0.0));
1594 for (i = 0; i < coords; i++)
1595 src0[nsrc0++] = ddy[i];
1596 if (coords < 2)
1597 src0[nsrc0++] = create_immed(b, fui(0.0));
1598 }
1599
1600 /* NOTE a3xx (and possibly a4xx?) might be different, using isaml
1601 * with scaled x coord according to requested sample:
1602 */
1603 if (tex->op == nir_texop_txf_ms) {
1604 if (ctx->compiler->txf_ms_with_isaml) {
1605 /* the samples are laid out in x dimension as
1606 * 0 1 2 3
1607 * x_ms = (x << ms) + sample_index;
1608 */
1609 struct ir3_instruction *ms;
1610 ms = create_immed(b, (ctx->samples >> (2 * tex->texture_index)) & 3);
1611
1612 src0[0] = ir3_SHL_B(b, src0[0], 0, ms, 0);
1613 src0[0] = ir3_ADD_U(b, src0[0], 0, sample_index, 0);
1614
1615 opc = OPC_ISAML;
1616 } else {
1617 src0[nsrc0++] = sample_index;
1618 }
1619 }
1620
1621 /*
1622 * second argument (if applicable):
1623 * - offsets
1624 * - lod
1625 * - bias
1626 */
1627 if (has_off | has_lod | has_bias) {
1628 if (has_off) {
1629 unsigned off_coords = coords;
1630 if (tex->sampler_dim == GLSL_SAMPLER_DIM_CUBE)
1631 off_coords--;
1632 for (i = 0; i < off_coords; i++)
1633 src1[nsrc1++] = off[i];
1634 if (off_coords < 2)
1635 src1[nsrc1++] = create_immed(b, fui(0.0));
1636 flags |= IR3_INSTR_O;
1637 }
1638
1639 if (has_lod | has_bias)
1640 src1[nsrc1++] = lod;
1641 }
1642
1643 switch (tex->dest_type) {
1644 case nir_type_invalid:
1645 case nir_type_float:
1646 type = TYPE_F32;
1647 break;
1648 case nir_type_int:
1649 type = TYPE_S32;
1650 break;
1651 case nir_type_uint:
1652 case nir_type_bool:
1653 type = TYPE_U32;
1654 break;
1655 default:
1656 unreachable("bad dest_type");
1657 }
1658
1659 if (opc == OPC_GETLOD)
1660 type = TYPE_U32;
1661
1662 unsigned tex_idx = tex->texture_index;
1663
1664 ctx->max_texture_index = MAX2(ctx->max_texture_index, tex_idx);
1665
1666 struct ir3_instruction *col0 = ir3_create_collect(ctx, src0, nsrc0);
1667 struct ir3_instruction *col1 = ir3_create_collect(ctx, src1, nsrc1);
1668
1669 sam = ir3_SAM(b, opc, type, MASK(ncomp), flags,
1670 tex_idx, tex_idx, col0, col1);
1671
1672 if ((ctx->astc_srgb & (1 << tex_idx)) && !nir_tex_instr_is_query(tex)) {
1673 /* only need first 3 components: */
1674 sam->regs[0]->wrmask = 0x7;
1675 ir3_split_dest(b, dst, sam, 0, 3);
1676
1677 /* we need to sample the alpha separately with a non-ASTC
1678 * texture state:
1679 */
1680 sam = ir3_SAM(b, opc, type, 0b1000, flags,
1681 tex_idx, tex_idx, col0, col1);
1682
1683 array_insert(ctx->ir, ctx->ir->astc_srgb, sam);
1684
1685 /* fixup .w component: */
1686 ir3_split_dest(b, &dst[3], sam, 3, 1);
1687 } else {
1688 /* normal (non-workaround) case: */
1689 ir3_split_dest(b, dst, sam, 0, ncomp);
1690 }
1691
1692 /* GETLOD returns results in 4.8 fixed point */
1693 if (opc == OPC_GETLOD) {
1694 struct ir3_instruction *factor = create_immed(b, fui(1.0 / 256));
1695
1696 compile_assert(ctx, tex->dest_type == nir_type_float);
1697 for (i = 0; i < 2; i++) {
1698 dst[i] = ir3_MUL_F(b, ir3_COV(b, dst[i], TYPE_U32, TYPE_F32), 0,
1699 factor, 0);
1700 }
1701 }
1702
1703 ir3_put_dst(ctx, &tex->dest);
1704 }
1705
1706 static void
1707 emit_tex_query_levels(struct ir3_context *ctx, nir_tex_instr *tex)
1708 {
1709 struct ir3_block *b = ctx->block;
1710 struct ir3_instruction **dst, *sam;
1711
1712 dst = ir3_get_dst(ctx, &tex->dest, 1);
1713
1714 sam = ir3_SAM(b, OPC_GETINFO, TYPE_U32, 0b0100, 0,
1715 tex->texture_index, tex->texture_index, NULL, NULL);
1716
1717 /* even though there is only one component, since it ends
1718 * up in .z rather than .x, we need a split_dest()
1719 */
1720 ir3_split_dest(b, dst, sam, 0, 3);
1721
1722 /* The # of levels comes from getinfo.z. We need to add 1 to it, since
1723 * the value in TEX_CONST_0 is zero-based.
1724 */
1725 if (ctx->compiler->levels_add_one)
1726 dst[0] = ir3_ADD_U(b, dst[0], 0, create_immed(b, 1), 0);
1727
1728 ir3_put_dst(ctx, &tex->dest);
1729 }
1730
1731 static void
1732 emit_tex_txs(struct ir3_context *ctx, nir_tex_instr *tex)
1733 {
1734 struct ir3_block *b = ctx->block;
1735 struct ir3_instruction **dst, *sam;
1736 struct ir3_instruction *lod;
1737 unsigned flags, coords;
1738
1739 tex_info(tex, &flags, &coords);
1740
1741 /* Actually we want the number of dimensions, not coordinates. This
1742 * distinction only matters for cubes.
1743 */
1744 if (tex->sampler_dim == GLSL_SAMPLER_DIM_CUBE)
1745 coords = 2;
1746
1747 dst = ir3_get_dst(ctx, &tex->dest, 4);
1748
1749 compile_assert(ctx, tex->num_srcs == 1);
1750 compile_assert(ctx, tex->src[0].src_type == nir_tex_src_lod);
1751
1752 lod = ir3_get_src(ctx, &tex->src[0].src)[0];
1753
1754 sam = ir3_SAM(b, OPC_GETSIZE, TYPE_U32, 0b1111, flags,
1755 tex->texture_index, tex->texture_index, lod, NULL);
1756
1757 ir3_split_dest(b, dst, sam, 0, 4);
1758
1759 /* Array size actually ends up in .w rather than .z. This doesn't
1760 * matter for miplevel 0, but for higher mips the value in z is
1761 * minified whereas w stays. Also, the value in TEX_CONST_3_DEPTH is
1762 * returned, which means that we have to add 1 to it for arrays.
1763 */
1764 if (tex->is_array) {
1765 if (ctx->compiler->levels_add_one) {
1766 dst[coords] = ir3_ADD_U(b, dst[3], 0, create_immed(b, 1), 0);
1767 } else {
1768 dst[coords] = ir3_MOV(b, dst[3], TYPE_U32);
1769 }
1770 }
1771
1772 ir3_put_dst(ctx, &tex->dest);
1773 }
1774
1775 static void
1776 emit_jump(struct ir3_context *ctx, nir_jump_instr *jump)
1777 {
1778 switch (jump->type) {
1779 case nir_jump_break:
1780 case nir_jump_continue:
1781 case nir_jump_return:
1782 /* I *think* we can simply just ignore this, and use the
1783 * successor block link to figure out where we need to
1784 * jump to for break/continue
1785 */
1786 break;
1787 default:
1788 ir3_context_error(ctx, "Unhandled NIR jump type: %d\n", jump->type);
1789 break;
1790 }
1791 }
1792
1793 static void
1794 emit_instr(struct ir3_context *ctx, nir_instr *instr)
1795 {
1796 switch (instr->type) {
1797 case nir_instr_type_alu:
1798 emit_alu(ctx, nir_instr_as_alu(instr));
1799 break;
1800 case nir_instr_type_deref:
1801 /* ignored, handled as part of the intrinsic they are src to */
1802 break;
1803 case nir_instr_type_intrinsic:
1804 emit_intrinsic(ctx, nir_instr_as_intrinsic(instr));
1805 break;
1806 case nir_instr_type_load_const:
1807 emit_load_const(ctx, nir_instr_as_load_const(instr));
1808 break;
1809 case nir_instr_type_ssa_undef:
1810 emit_undef(ctx, nir_instr_as_ssa_undef(instr));
1811 break;
1812 case nir_instr_type_tex: {
1813 nir_tex_instr *tex = nir_instr_as_tex(instr);
1814 /* couple tex instructions get special-cased:
1815 */
1816 switch (tex->op) {
1817 case nir_texop_txs:
1818 emit_tex_txs(ctx, tex);
1819 break;
1820 case nir_texop_query_levels:
1821 emit_tex_query_levels(ctx, tex);
1822 break;
1823 default:
1824 emit_tex(ctx, tex);
1825 break;
1826 }
1827 break;
1828 }
1829 case nir_instr_type_jump:
1830 emit_jump(ctx, nir_instr_as_jump(instr));
1831 break;
1832 case nir_instr_type_phi:
1833 /* we have converted phi webs to regs in NIR by now */
1834 ir3_context_error(ctx, "Unexpected NIR instruction type: %d\n", instr->type);
1835 break;
1836 case nir_instr_type_call:
1837 case nir_instr_type_parallel_copy:
1838 ir3_context_error(ctx, "Unhandled NIR instruction type: %d\n", instr->type);
1839 break;
1840 }
1841 }
1842
1843 static struct ir3_block *
1844 get_block(struct ir3_context *ctx, const nir_block *nblock)
1845 {
1846 struct ir3_block *block;
1847 struct hash_entry *hentry;
1848 unsigned i;
1849
1850 hentry = _mesa_hash_table_search(ctx->block_ht, nblock);
1851 if (hentry)
1852 return hentry->data;
1853
1854 block = ir3_block_create(ctx->ir);
1855 block->nblock = nblock;
1856 _mesa_hash_table_insert(ctx->block_ht, nblock, block);
1857
1858 block->predecessors_count = nblock->predecessors->entries;
1859 block->predecessors = ralloc_array_size(block,
1860 sizeof(block->predecessors[0]), block->predecessors_count);
1861 i = 0;
1862 set_foreach(nblock->predecessors, sentry) {
1863 block->predecessors[i++] = get_block(ctx, sentry->key);
1864 }
1865
1866 return block;
1867 }
1868
1869 static void
1870 emit_block(struct ir3_context *ctx, nir_block *nblock)
1871 {
1872 struct ir3_block *block = get_block(ctx, nblock);
1873
1874 for (int i = 0; i < ARRAY_SIZE(block->successors); i++) {
1875 if (nblock->successors[i]) {
1876 block->successors[i] =
1877 get_block(ctx, nblock->successors[i]);
1878 }
1879 }
1880
1881 ctx->block = block;
1882 list_addtail(&block->node, &ctx->ir->block_list);
1883
1884 /* re-emit addr register in each block if needed: */
1885 for (int i = 0; i < ARRAY_SIZE(ctx->addr_ht); i++) {
1886 _mesa_hash_table_destroy(ctx->addr_ht[i], NULL);
1887 ctx->addr_ht[i] = NULL;
1888 }
1889
1890 nir_foreach_instr(instr, nblock) {
1891 ctx->cur_instr = instr;
1892 emit_instr(ctx, instr);
1893 ctx->cur_instr = NULL;
1894 if (ctx->error)
1895 return;
1896 }
1897 }
1898
1899 static void emit_cf_list(struct ir3_context *ctx, struct exec_list *list);
1900
1901 static void
1902 emit_if(struct ir3_context *ctx, nir_if *nif)
1903 {
1904 struct ir3_instruction *condition = ir3_get_src(ctx, &nif->condition)[0];
1905
1906 ctx->block->condition =
1907 ir3_get_predicate(ctx, ir3_b2n(condition->block, condition));
1908
1909 emit_cf_list(ctx, &nif->then_list);
1910 emit_cf_list(ctx, &nif->else_list);
1911 }
1912
1913 static void
1914 emit_loop(struct ir3_context *ctx, nir_loop *nloop)
1915 {
1916 emit_cf_list(ctx, &nloop->body);
1917 }
1918
1919 static void
1920 stack_push(struct ir3_context *ctx)
1921 {
1922 ctx->stack++;
1923 ctx->max_stack = MAX2(ctx->max_stack, ctx->stack);
1924 }
1925
1926 static void
1927 stack_pop(struct ir3_context *ctx)
1928 {
1929 compile_assert(ctx, ctx->stack > 0);
1930 ctx->stack--;
1931 }
1932
1933 static void
1934 emit_cf_list(struct ir3_context *ctx, struct exec_list *list)
1935 {
1936 foreach_list_typed(nir_cf_node, node, node, list) {
1937 switch (node->type) {
1938 case nir_cf_node_block:
1939 emit_block(ctx, nir_cf_node_as_block(node));
1940 break;
1941 case nir_cf_node_if:
1942 stack_push(ctx);
1943 emit_if(ctx, nir_cf_node_as_if(node));
1944 stack_pop(ctx);
1945 break;
1946 case nir_cf_node_loop:
1947 stack_push(ctx);
1948 emit_loop(ctx, nir_cf_node_as_loop(node));
1949 stack_pop(ctx);
1950 break;
1951 case nir_cf_node_function:
1952 ir3_context_error(ctx, "TODO\n");
1953 break;
1954 }
1955 }
1956 }
1957
1958 /* emit stream-out code. At this point, the current block is the original
1959 * (nir) end block, and nir ensures that all flow control paths terminate
1960 * into the end block. We re-purpose the original end block to generate
1961 * the 'if (vtxcnt < maxvtxcnt)' condition, then append the conditional
1962 * block holding stream-out write instructions, followed by the new end
1963 * block:
1964 *
1965 * blockOrigEnd {
1966 * p0.x = (vtxcnt < maxvtxcnt)
1967 * // succs: blockStreamOut, blockNewEnd
1968 * }
1969 * blockStreamOut {
1970 * ... stream-out instructions ...
1971 * // succs: blockNewEnd
1972 * }
1973 * blockNewEnd {
1974 * }
1975 */
1976 static void
1977 emit_stream_out(struct ir3_context *ctx)
1978 {
1979 struct ir3_shader_variant *v = ctx->so;
1980 struct ir3 *ir = ctx->ir;
1981 struct ir3_stream_output_info *strmout =
1982 &ctx->so->shader->stream_output;
1983 struct ir3_block *orig_end_block, *stream_out_block, *new_end_block;
1984 struct ir3_instruction *vtxcnt, *maxvtxcnt, *cond;
1985 struct ir3_instruction *bases[IR3_MAX_SO_BUFFERS];
1986
1987 /* create vtxcnt input in input block at top of shader,
1988 * so that it is seen as live over the entire duration
1989 * of the shader:
1990 */
1991 vtxcnt = create_input(ctx, 0);
1992 add_sysval_input(ctx, SYSTEM_VALUE_VERTEX_CNT, vtxcnt);
1993
1994 maxvtxcnt = create_driver_param(ctx, IR3_DP_VTXCNT_MAX);
1995
1996 /* at this point, we are at the original 'end' block,
1997 * re-purpose this block to stream-out condition, then
1998 * append stream-out block and new-end block
1999 */
2000 orig_end_block = ctx->block;
2001
2002 // TODO these blocks need to update predecessors..
2003 // maybe w/ store_global intrinsic, we could do this
2004 // stuff in nir->nir pass
2005
2006 stream_out_block = ir3_block_create(ir);
2007 list_addtail(&stream_out_block->node, &ir->block_list);
2008
2009 new_end_block = ir3_block_create(ir);
2010 list_addtail(&new_end_block->node, &ir->block_list);
2011
2012 orig_end_block->successors[0] = stream_out_block;
2013 orig_end_block->successors[1] = new_end_block;
2014 stream_out_block->successors[0] = new_end_block;
2015
2016 /* setup 'if (vtxcnt < maxvtxcnt)' condition: */
2017 cond = ir3_CMPS_S(ctx->block, vtxcnt, 0, maxvtxcnt, 0);
2018 cond->regs[0]->num = regid(REG_P0, 0);
2019 cond->cat2.condition = IR3_COND_LT;
2020
2021 /* condition goes on previous block to the conditional,
2022 * since it is used to pick which of the two successor
2023 * paths to take:
2024 */
2025 orig_end_block->condition = cond;
2026
2027 /* switch to stream_out_block to generate the stream-out
2028 * instructions:
2029 */
2030 ctx->block = stream_out_block;
2031
2032 /* Calculate base addresses based on vtxcnt. Instructions
2033 * generated for bases not used in following loop will be
2034 * stripped out in the backend.
2035 */
2036 for (unsigned i = 0; i < IR3_MAX_SO_BUFFERS; i++) {
2037 unsigned stride = strmout->stride[i];
2038 struct ir3_instruction *base, *off;
2039
2040 base = create_uniform(ctx->block, regid(v->constbase.tfbo, i));
2041
2042 /* 24-bit should be enough: */
2043 off = ir3_MUL_U(ctx->block, vtxcnt, 0,
2044 create_immed(ctx->block, stride * 4), 0);
2045
2046 bases[i] = ir3_ADD_S(ctx->block, off, 0, base, 0);
2047 }
2048
2049 /* Generate the per-output store instructions: */
2050 for (unsigned i = 0; i < strmout->num_outputs; i++) {
2051 for (unsigned j = 0; j < strmout->output[i].num_components; j++) {
2052 unsigned c = j + strmout->output[i].start_component;
2053 struct ir3_instruction *base, *out, *stg;
2054
2055 base = bases[strmout->output[i].output_buffer];
2056 out = ctx->ir->outputs[regid(strmout->output[i].register_index, c)];
2057
2058 stg = ir3_STG(ctx->block, base, 0, out, 0,
2059 create_immed(ctx->block, 1), 0);
2060 stg->cat6.type = TYPE_U32;
2061 stg->cat6.dst_offset = (strmout->output[i].dst_offset + j) * 4;
2062
2063 array_insert(ctx->block, ctx->block->keeps, stg);
2064 }
2065 }
2066
2067 /* and finally switch to the new_end_block: */
2068 ctx->block = new_end_block;
2069 }
2070
2071 static void
2072 emit_function(struct ir3_context *ctx, nir_function_impl *impl)
2073 {
2074 nir_metadata_require(impl, nir_metadata_block_index);
2075
2076 compile_assert(ctx, ctx->stack == 0);
2077
2078 emit_cf_list(ctx, &impl->body);
2079 emit_block(ctx, impl->end_block);
2080
2081 compile_assert(ctx, ctx->stack == 0);
2082
2083 /* at this point, we should have a single empty block,
2084 * into which we emit the 'end' instruction.
2085 */
2086 compile_assert(ctx, list_empty(&ctx->block->instr_list));
2087
2088 /* If stream-out (aka transform-feedback) enabled, emit the
2089 * stream-out instructions, followed by a new empty block (into
2090 * which the 'end' instruction lands).
2091 *
2092 * NOTE: it is done in this order, rather than inserting before
2093 * we emit end_block, because NIR guarantees that all blocks
2094 * flow into end_block, and that end_block has no successors.
2095 * So by re-purposing end_block as the first block of stream-
2096 * out, we guarantee that all exit paths flow into the stream-
2097 * out instructions.
2098 */
2099 if ((ctx->compiler->gpu_id < 500) &&
2100 (ctx->so->shader->stream_output.num_outputs > 0) &&
2101 !ctx->so->binning_pass) {
2102 debug_assert(ctx->so->type == MESA_SHADER_VERTEX);
2103 emit_stream_out(ctx);
2104 }
2105
2106 ir3_END(ctx->block);
2107 }
2108
2109 static struct ir3_instruction *
2110 create_frag_coord(struct ir3_context *ctx, unsigned comp)
2111 {
2112 struct ir3_block *block = ctx->block;
2113 struct ir3_instruction *instr;
2114
2115 if (!ctx->frag_coord) {
2116 ctx->frag_coord = create_input_compmask(ctx, 0, 0xf);
2117 /* defer add_sysval_input() until after all inputs created */
2118 }
2119
2120 ir3_split_dest(block, &instr, ctx->frag_coord, comp, 1);
2121
2122 switch (comp) {
2123 case 0: /* .x */
2124 case 1: /* .y */
2125 /* for frag_coord, we get unsigned values.. we need
2126 * to subtract (integer) 8 and divide by 16 (right-
2127 * shift by 4) then convert to float:
2128 *
2129 * sub.s tmp, src, 8
2130 * shr.b tmp, tmp, 4
2131 * mov.u32f32 dst, tmp
2132 *
2133 */
2134 instr = ir3_SUB_S(block, instr, 0,
2135 create_immed(block, 8), 0);
2136 instr = ir3_SHR_B(block, instr, 0,
2137 create_immed(block, 4), 0);
2138 instr = ir3_COV(block, instr, TYPE_U32, TYPE_F32);
2139
2140 return instr;
2141 case 2: /* .z */
2142 case 3: /* .w */
2143 default:
2144 /* seems that we can use these as-is: */
2145 return instr;
2146 }
2147 }
2148
2149 static void
2150 setup_input(struct ir3_context *ctx, nir_variable *in)
2151 {
2152 struct ir3_shader_variant *so = ctx->so;
2153 unsigned ncomp = glsl_get_components(in->type);
2154 unsigned n = in->data.driver_location;
2155 unsigned frac = in->data.location_frac;
2156 unsigned slot = in->data.location;
2157
2158 /* skip unread inputs, we could end up with (for example), unsplit
2159 * matrix/etc inputs in the case they are not read, so just silently
2160 * skip these.
2161 */
2162 if (ncomp > 4)
2163 return;
2164
2165 so->inputs[n].slot = slot;
2166 so->inputs[n].compmask = (1 << (ncomp + frac)) - 1;
2167 so->inputs_count = MAX2(so->inputs_count, n + 1);
2168 so->inputs[n].interpolate = in->data.interpolation;
2169
2170 if (ctx->so->type == MESA_SHADER_FRAGMENT) {
2171 for (int i = 0; i < ncomp; i++) {
2172 struct ir3_instruction *instr = NULL;
2173 unsigned idx = (n * 4) + i + frac;
2174
2175 if (slot == VARYING_SLOT_POS) {
2176 so->inputs[n].bary = false;
2177 so->frag_coord = true;
2178 instr = create_frag_coord(ctx, i);
2179 } else if (slot == VARYING_SLOT_PNTC) {
2180 /* see for example st_nir_fixup_varying_slots().. this is
2181 * maybe a bit mesa/st specific. But we need things to line
2182 * up for this in fdN_program:
2183 * unsigned texmask = 1 << (slot - VARYING_SLOT_VAR0);
2184 * if (emit->sprite_coord_enable & texmask) {
2185 * ...
2186 * }
2187 */
2188 so->inputs[n].slot = VARYING_SLOT_VAR8;
2189 so->inputs[n].bary = true;
2190 instr = create_frag_input(ctx, false);
2191 } else {
2192 bool use_ldlv = false;
2193
2194 /* detect the special case for front/back colors where
2195 * we need to do flat vs smooth shading depending on
2196 * rast state:
2197 */
2198 if (in->data.interpolation == INTERP_MODE_NONE) {
2199 switch (slot) {
2200 case VARYING_SLOT_COL0:
2201 case VARYING_SLOT_COL1:
2202 case VARYING_SLOT_BFC0:
2203 case VARYING_SLOT_BFC1:
2204 so->inputs[n].rasterflat = true;
2205 break;
2206 default:
2207 break;
2208 }
2209 }
2210
2211 if (ctx->compiler->flat_bypass) {
2212 if ((so->inputs[n].interpolate == INTERP_MODE_FLAT) ||
2213 (so->inputs[n].rasterflat && ctx->so->key.rasterflat))
2214 use_ldlv = true;
2215 }
2216
2217 so->inputs[n].bary = true;
2218
2219 instr = create_frag_input(ctx, use_ldlv);
2220 }
2221
2222 compile_assert(ctx, idx < ctx->ir->ninputs);
2223
2224 ctx->ir->inputs[idx] = instr;
2225 }
2226 } else if (ctx->so->type == MESA_SHADER_VERTEX) {
2227 for (int i = 0; i < ncomp; i++) {
2228 unsigned idx = (n * 4) + i + frac;
2229 compile_assert(ctx, idx < ctx->ir->ninputs);
2230 ctx->ir->inputs[idx] = create_input(ctx, idx);
2231 }
2232 } else {
2233 ir3_context_error(ctx, "unknown shader type: %d\n", ctx->so->type);
2234 }
2235
2236 if (so->inputs[n].bary || (ctx->so->type == MESA_SHADER_VERTEX)) {
2237 so->total_in += ncomp;
2238 }
2239 }
2240
2241 static void
2242 setup_output(struct ir3_context *ctx, nir_variable *out)
2243 {
2244 struct ir3_shader_variant *so = ctx->so;
2245 unsigned ncomp = glsl_get_components(out->type);
2246 unsigned n = out->data.driver_location;
2247 unsigned frac = out->data.location_frac;
2248 unsigned slot = out->data.location;
2249 unsigned comp = 0;
2250
2251 if (ctx->so->type == MESA_SHADER_FRAGMENT) {
2252 switch (slot) {
2253 case FRAG_RESULT_DEPTH:
2254 comp = 2; /* tgsi will write to .z component */
2255 so->writes_pos = true;
2256 break;
2257 case FRAG_RESULT_COLOR:
2258 so->color0_mrt = 1;
2259 break;
2260 default:
2261 if (slot >= FRAG_RESULT_DATA0)
2262 break;
2263 ir3_context_error(ctx, "unknown FS output name: %s\n",
2264 gl_frag_result_name(slot));
2265 }
2266 } else if (ctx->so->type == MESA_SHADER_VERTEX) {
2267 switch (slot) {
2268 case VARYING_SLOT_POS:
2269 so->writes_pos = true;
2270 break;
2271 case VARYING_SLOT_PSIZ:
2272 so->writes_psize = true;
2273 break;
2274 case VARYING_SLOT_COL0:
2275 case VARYING_SLOT_COL1:
2276 case VARYING_SLOT_BFC0:
2277 case VARYING_SLOT_BFC1:
2278 case VARYING_SLOT_FOGC:
2279 case VARYING_SLOT_CLIP_DIST0:
2280 case VARYING_SLOT_CLIP_DIST1:
2281 case VARYING_SLOT_CLIP_VERTEX:
2282 break;
2283 default:
2284 if (slot >= VARYING_SLOT_VAR0)
2285 break;
2286 if ((VARYING_SLOT_TEX0 <= slot) && (slot <= VARYING_SLOT_TEX7))
2287 break;
2288 ir3_context_error(ctx, "unknown VS output name: %s\n",
2289 gl_varying_slot_name(slot));
2290 }
2291 } else {
2292 ir3_context_error(ctx, "unknown shader type: %d\n", ctx->so->type);
2293 }
2294
2295 compile_assert(ctx, n < ARRAY_SIZE(so->outputs));
2296
2297 so->outputs[n].slot = slot;
2298 so->outputs[n].regid = regid(n, comp);
2299 so->outputs_count = MAX2(so->outputs_count, n + 1);
2300
2301 for (int i = 0; i < ncomp; i++) {
2302 unsigned idx = (n * 4) + i + frac;
2303 compile_assert(ctx, idx < ctx->ir->noutputs);
2304 ctx->ir->outputs[idx] = create_immed(ctx->block, fui(0.0));
2305 }
2306
2307 /* if varying packing doesn't happen, we could end up in a situation
2308 * with "holes" in the output, and since the per-generation code that
2309 * sets up varying linkage registers doesn't expect to have more than
2310 * one varying per vec4 slot, pad the holes.
2311 *
2312 * Note that this should probably generate a performance warning of
2313 * some sort.
2314 */
2315 for (int i = 0; i < frac; i++) {
2316 unsigned idx = (n * 4) + i;
2317 if (!ctx->ir->outputs[idx]) {
2318 ctx->ir->outputs[idx] = create_immed(ctx->block, fui(0.0));
2319 }
2320 }
2321 }
2322
2323 static int
2324 max_drvloc(struct exec_list *vars)
2325 {
2326 int drvloc = -1;
2327 nir_foreach_variable(var, vars) {
2328 drvloc = MAX2(drvloc, (int)var->data.driver_location);
2329 }
2330 return drvloc;
2331 }
2332
2333 static const unsigned max_sysvals[] = {
2334 [MESA_SHADER_FRAGMENT] = 24, // TODO
2335 [MESA_SHADER_VERTEX] = 16,
2336 [MESA_SHADER_COMPUTE] = 16, // TODO how many do we actually need?
2337 [MESA_SHADER_KERNEL] = 16, // TODO how many do we actually need?
2338 };
2339
2340 static void
2341 emit_instructions(struct ir3_context *ctx)
2342 {
2343 unsigned ninputs, noutputs;
2344 nir_function_impl *fxn = nir_shader_get_entrypoint(ctx->s);
2345
2346 ninputs = (max_drvloc(&ctx->s->inputs) + 1) * 4;
2347 noutputs = (max_drvloc(&ctx->s->outputs) + 1) * 4;
2348
2349 /* we need to leave room for sysvals:
2350 */
2351 ninputs += max_sysvals[ctx->so->type];
2352
2353 ctx->ir = ir3_create(ctx->compiler, ninputs, noutputs);
2354
2355 /* Create inputs in first block: */
2356 ctx->block = get_block(ctx, nir_start_block(fxn));
2357 ctx->in_block = ctx->block;
2358 list_addtail(&ctx->block->node, &ctx->ir->block_list);
2359
2360 ninputs -= max_sysvals[ctx->so->type];
2361
2362 /* for fragment shader, the vcoord input register is used as the
2363 * base for bary.f varying fetch instrs:
2364 */
2365 struct ir3_instruction *vcoord = NULL;
2366 if (ctx->so->type == MESA_SHADER_FRAGMENT) {
2367 struct ir3_instruction *xy[2];
2368
2369 vcoord = create_input_compmask(ctx, 0, 0x3);
2370 ir3_split_dest(ctx->block, xy, vcoord, 0, 2);
2371
2372 ctx->frag_vcoord = ir3_create_collect(ctx, xy, 2);
2373 }
2374
2375 /* Setup inputs: */
2376 nir_foreach_variable(var, &ctx->s->inputs) {
2377 setup_input(ctx, var);
2378 }
2379
2380 /* Defer add_sysval_input() stuff until after setup_inputs(),
2381 * because sysvals need to be appended after varyings:
2382 */
2383 if (vcoord) {
2384 add_sysval_input_compmask(ctx, SYSTEM_VALUE_VARYING_COORD,
2385 0x3, vcoord);
2386 }
2387
2388 if (ctx->frag_coord) {
2389 add_sysval_input_compmask(ctx, SYSTEM_VALUE_FRAG_COORD,
2390 0xf, ctx->frag_coord);
2391 }
2392
2393 /* Setup outputs: */
2394 nir_foreach_variable(var, &ctx->s->outputs) {
2395 setup_output(ctx, var);
2396 }
2397
2398 /* Find # of samplers: */
2399 nir_foreach_variable(var, &ctx->s->uniforms) {
2400 ctx->so->num_samp += glsl_type_get_sampler_count(var->type);
2401 /* just assume that we'll be reading from images.. if it
2402 * is write-only we don't have to count it, but not sure
2403 * if there is a good way to know?
2404 */
2405 ctx->so->num_samp += glsl_type_get_image_count(var->type);
2406 }
2407
2408 /* Setup registers (which should only be arrays): */
2409 nir_foreach_register(reg, &ctx->s->registers) {
2410 ir3_declare_array(ctx, reg);
2411 }
2412
2413 /* NOTE: need to do something more clever when we support >1 fxn */
2414 nir_foreach_register(reg, &fxn->registers) {
2415 ir3_declare_array(ctx, reg);
2416 }
2417 /* And emit the body: */
2418 ctx->impl = fxn;
2419 emit_function(ctx, fxn);
2420 }
2421
2422 /* from NIR perspective, we actually have varying inputs. But the varying
2423 * inputs, from an IR standpoint, are just bary.f/ldlv instructions. The
2424 * only actual inputs are the sysvals.
2425 */
2426 static void
2427 fixup_frag_inputs(struct ir3_context *ctx)
2428 {
2429 struct ir3_shader_variant *so = ctx->so;
2430 struct ir3 *ir = ctx->ir;
2431 unsigned i = 0;
2432
2433 /* sysvals should appear at the end of the inputs, drop everything else: */
2434 while ((i < so->inputs_count) && !so->inputs[i].sysval)
2435 i++;
2436
2437 /* at IR level, inputs are always blocks of 4 scalars: */
2438 i *= 4;
2439
2440 ir->inputs = &ir->inputs[i];
2441 ir->ninputs -= i;
2442 }
2443
2444 /* Fixup tex sampler state for astc/srgb workaround instructions. We
2445 * need to assign the tex state indexes for these after we know the
2446 * max tex index.
2447 */
2448 static void
2449 fixup_astc_srgb(struct ir3_context *ctx)
2450 {
2451 struct ir3_shader_variant *so = ctx->so;
2452 /* indexed by original tex idx, value is newly assigned alpha sampler
2453 * state tex idx. Zero is invalid since there is at least one sampler
2454 * if we get here.
2455 */
2456 unsigned alt_tex_state[16] = {0};
2457 unsigned tex_idx = ctx->max_texture_index + 1;
2458 unsigned idx = 0;
2459
2460 so->astc_srgb.base = tex_idx;
2461
2462 for (unsigned i = 0; i < ctx->ir->astc_srgb_count; i++) {
2463 struct ir3_instruction *sam = ctx->ir->astc_srgb[i];
2464
2465 compile_assert(ctx, sam->cat5.tex < ARRAY_SIZE(alt_tex_state));
2466
2467 if (alt_tex_state[sam->cat5.tex] == 0) {
2468 /* assign new alternate/alpha tex state slot: */
2469 alt_tex_state[sam->cat5.tex] = tex_idx++;
2470 so->astc_srgb.orig_idx[idx++] = sam->cat5.tex;
2471 so->astc_srgb.count++;
2472 }
2473
2474 sam->cat5.tex = alt_tex_state[sam->cat5.tex];
2475 }
2476 }
2477
2478 static void
2479 fixup_binning_pass(struct ir3_context *ctx)
2480 {
2481 struct ir3_shader_variant *so = ctx->so;
2482 struct ir3 *ir = ctx->ir;
2483 unsigned i, j;
2484
2485 for (i = 0, j = 0; i < so->outputs_count; i++) {
2486 unsigned slot = so->outputs[i].slot;
2487
2488 /* throw away everything but first position/psize */
2489 if ((slot == VARYING_SLOT_POS) || (slot == VARYING_SLOT_PSIZ)) {
2490 if (i != j) {
2491 so->outputs[j] = so->outputs[i];
2492 ir->outputs[(j*4)+0] = ir->outputs[(i*4)+0];
2493 ir->outputs[(j*4)+1] = ir->outputs[(i*4)+1];
2494 ir->outputs[(j*4)+2] = ir->outputs[(i*4)+2];
2495 ir->outputs[(j*4)+3] = ir->outputs[(i*4)+3];
2496 }
2497 j++;
2498 }
2499 }
2500 so->outputs_count = j;
2501 ir->noutputs = j * 4;
2502 }
2503
2504 int
2505 ir3_compile_shader_nir(struct ir3_compiler *compiler,
2506 struct ir3_shader_variant *so)
2507 {
2508 struct ir3_context *ctx;
2509 struct ir3 *ir;
2510 struct ir3_instruction **inputs;
2511 unsigned i, actual_in, inloc;
2512 int ret = 0, max_bary;
2513
2514 assert(!so->ir);
2515
2516 ctx = ir3_context_init(compiler, so);
2517 if (!ctx) {
2518 DBG("INIT failed!");
2519 ret = -1;
2520 goto out;
2521 }
2522
2523 emit_instructions(ctx);
2524
2525 if (ctx->error) {
2526 DBG("EMIT failed!");
2527 ret = -1;
2528 goto out;
2529 }
2530
2531 ir = so->ir = ctx->ir;
2532
2533 /* keep track of the inputs from TGSI perspective.. */
2534 inputs = ir->inputs;
2535
2536 /* but fixup actual inputs for frag shader: */
2537 if (so->type == MESA_SHADER_FRAGMENT)
2538 fixup_frag_inputs(ctx);
2539
2540 /* at this point, for binning pass, throw away unneeded outputs: */
2541 if (so->binning_pass && (ctx->compiler->gpu_id < 600))
2542 fixup_binning_pass(ctx);
2543
2544 /* if we want half-precision outputs, mark the output registers
2545 * as half:
2546 */
2547 if (so->key.half_precision) {
2548 for (i = 0; i < ir->noutputs; i++) {
2549 struct ir3_instruction *out = ir->outputs[i];
2550
2551 if (!out)
2552 continue;
2553
2554 /* if frag shader writes z, that needs to be full precision: */
2555 if (so->outputs[i/4].slot == FRAG_RESULT_DEPTH)
2556 continue;
2557
2558 out->regs[0]->flags |= IR3_REG_HALF;
2559 /* output could be a fanout (ie. texture fetch output)
2560 * in which case we need to propagate the half-reg flag
2561 * up to the definer so that RA sees it:
2562 */
2563 if (out->opc == OPC_META_FO) {
2564 out = out->regs[1]->instr;
2565 out->regs[0]->flags |= IR3_REG_HALF;
2566 }
2567
2568 if (out->opc == OPC_MOV) {
2569 out->cat1.dst_type = half_type(out->cat1.dst_type);
2570 }
2571 }
2572 }
2573
2574 if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
2575 printf("BEFORE CP:\n");
2576 ir3_print(ir);
2577 }
2578
2579 ir3_cp(ir, so);
2580
2581 /* at this point, for binning pass, throw away unneeded outputs:
2582 * Note that for a6xx and later, we do this after ir3_cp to ensure
2583 * that the uniform/constant layout for BS and VS matches, so that
2584 * we can re-use same VS_CONST state group.
2585 */
2586 if (so->binning_pass && (ctx->compiler->gpu_id >= 600))
2587 fixup_binning_pass(ctx);
2588
2589 /* Insert mov if there's same instruction for each output.
2590 * eg. dEQP-GLES31.functional.shaders.opaque_type_indexing.sampler.const_expression.vertex.sampler2dshadow
2591 */
2592 for (int i = ir->noutputs - 1; i >= 0; i--) {
2593 if (!ir->outputs[i])
2594 continue;
2595 for (unsigned j = 0; j < i; j++) {
2596 if (ir->outputs[i] == ir->outputs[j]) {
2597 ir->outputs[i] =
2598 ir3_MOV(ir->outputs[i]->block, ir->outputs[i], TYPE_F32);
2599 }
2600 }
2601 }
2602
2603 if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
2604 printf("BEFORE GROUPING:\n");
2605 ir3_print(ir);
2606 }
2607
2608 ir3_sched_add_deps(ir);
2609
2610 /* Group left/right neighbors, inserting mov's where needed to
2611 * solve conflicts:
2612 */
2613 ir3_group(ir);
2614
2615 if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
2616 printf("AFTER GROUPING:\n");
2617 ir3_print(ir);
2618 }
2619
2620 ir3_depth(ir);
2621
2622 if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
2623 printf("AFTER DEPTH:\n");
2624 ir3_print(ir);
2625 }
2626
2627 /* do Sethi–Ullman numbering before scheduling: */
2628 ir3_sun(ir);
2629
2630 ret = ir3_sched(ir);
2631 if (ret) {
2632 DBG("SCHED failed!");
2633 goto out;
2634 }
2635
2636 if (compiler->gpu_id >= 600) {
2637 ir3_a6xx_fixup_atomic_dests(ir, so);
2638 }
2639
2640 if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
2641 printf("AFTER SCHED:\n");
2642 ir3_print(ir);
2643 }
2644
2645 ret = ir3_ra(ir, so->type, so->frag_coord, so->frag_face);
2646 if (ret) {
2647 DBG("RA failed!");
2648 goto out;
2649 }
2650
2651 if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
2652 printf("AFTER RA:\n");
2653 ir3_print(ir);
2654 }
2655
2656 /* fixup input/outputs: */
2657 for (i = 0; i < so->outputs_count; i++) {
2658 /* sometimes we get outputs that don't write the .x coord, like:
2659 *
2660 * decl_var shader_out INTERP_MODE_NONE float Color (VARYING_SLOT_VAR9.z, 1, 0)
2661 *
2662 * Presumably the result of varying packing and then eliminating
2663 * some unneeded varyings? Just skip head to the first valid
2664 * component of the output.
2665 */
2666 for (unsigned j = 0; j < 4; j++) {
2667 struct ir3_instruction *instr = ir->outputs[(i*4) + j];
2668 if (instr) {
2669 so->outputs[i].regid = instr->regs[0]->num;
2670 break;
2671 }
2672 }
2673 }
2674
2675 /* Note that some or all channels of an input may be unused: */
2676 actual_in = 0;
2677 inloc = 0;
2678 for (i = 0; i < so->inputs_count; i++) {
2679 unsigned j, reg = regid(63,0), compmask = 0, maxcomp = 0;
2680 so->inputs[i].ncomp = 0;
2681 so->inputs[i].inloc = inloc;
2682 for (j = 0; j < 4; j++) {
2683 struct ir3_instruction *in = inputs[(i*4) + j];
2684 if (in && !(in->flags & IR3_INSTR_UNUSED)) {
2685 compmask |= (1 << j);
2686 reg = in->regs[0]->num - j;
2687 actual_in++;
2688 so->inputs[i].ncomp++;
2689 if ((so->type == MESA_SHADER_FRAGMENT) && so->inputs[i].bary) {
2690 /* assign inloc: */
2691 assert(in->regs[1]->flags & IR3_REG_IMMED);
2692 in->regs[1]->iim_val = inloc + j;
2693 maxcomp = j + 1;
2694 }
2695 }
2696 }
2697 if ((so->type == MESA_SHADER_FRAGMENT) && compmask && so->inputs[i].bary) {
2698 so->varying_in++;
2699 so->inputs[i].compmask = (1 << maxcomp) - 1;
2700 inloc += maxcomp;
2701 } else if (!so->inputs[i].sysval) {
2702 so->inputs[i].compmask = compmask;
2703 }
2704 so->inputs[i].regid = reg;
2705 }
2706
2707 if (ctx->astc_srgb)
2708 fixup_astc_srgb(ctx);
2709
2710 /* We need to do legalize after (for frag shader's) the "bary.f"
2711 * offsets (inloc) have been assigned.
2712 */
2713 ir3_legalize(ir, &so->has_ssbo, &max_bary);
2714
2715 if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
2716 printf("AFTER LEGALIZE:\n");
2717 ir3_print(ir);
2718 }
2719
2720 so->branchstack = ctx->max_stack;
2721
2722 /* Note that actual_in counts inputs that are not bary.f'd for FS: */
2723 if (so->type == MESA_SHADER_VERTEX)
2724 so->total_in = actual_in;
2725 else
2726 so->total_in = max_bary + 1;
2727
2728 so->max_sun = ir->max_sun;
2729
2730 out:
2731 if (ret) {
2732 if (so->ir)
2733 ir3_destroy(so->ir);
2734 so->ir = NULL;
2735 }
2736 ir3_context_free(ctx);
2737
2738 return ret;
2739 }