1228fea5534122ec909bdbaf46ffe02ee9baa452
[mesa.git] / src / freedreno / ir3 / ir3_compiler_nir.c
1 /*
2 * Copyright (C) 2015 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #include <stdarg.h>
28
29 #include "util/u_string.h"
30 #include "util/u_memory.h"
31 #include "util/u_math.h"
32
33 #include "ir3_compiler.h"
34 #include "ir3_image.h"
35 #include "ir3_shader.h"
36 #include "ir3_nir.h"
37
38 #include "instr-a3xx.h"
39 #include "ir3.h"
40 #include "ir3_context.h"
41
42
43 static struct ir3_instruction *
44 create_indirect_load(struct ir3_context *ctx, unsigned arrsz, int n,
45 struct ir3_instruction *address, struct ir3_instruction *collect)
46 {
47 struct ir3_block *block = ctx->block;
48 struct ir3_instruction *mov;
49 struct ir3_register *src;
50
51 mov = ir3_instr_create(block, OPC_MOV);
52 mov->cat1.src_type = TYPE_U32;
53 mov->cat1.dst_type = TYPE_U32;
54 ir3_reg_create(mov, 0, 0);
55 src = ir3_reg_create(mov, 0, IR3_REG_SSA | IR3_REG_RELATIV);
56 src->instr = collect;
57 src->size = arrsz;
58 src->array.offset = n;
59
60 ir3_instr_set_address(mov, address);
61
62 return mov;
63 }
64
65 static struct ir3_instruction *
66 create_input_compmask(struct ir3_context *ctx, unsigned n, unsigned compmask)
67 {
68 struct ir3_instruction *in;
69
70 in = ir3_instr_create(ctx->in_block, OPC_META_INPUT);
71 in->input.sysval = ~0;
72 ir3_reg_create(in, n, 0);
73
74 in->regs[0]->wrmask = compmask;
75
76 return in;
77 }
78
79 static struct ir3_instruction *
80 create_input(struct ir3_context *ctx, unsigned n)
81 {
82 return create_input_compmask(ctx, n, 0x1);
83 }
84
85 static struct ir3_instruction *
86 create_frag_input(struct ir3_context *ctx, bool use_ldlv, unsigned n)
87 {
88 struct ir3_block *block = ctx->block;
89 struct ir3_instruction *instr;
90 /* packed inloc is fixed up later: */
91 struct ir3_instruction *inloc = create_immed(block, n);
92
93 if (use_ldlv) {
94 instr = ir3_LDLV(block, inloc, 0, create_immed(block, 1), 0);
95 instr->cat6.type = TYPE_U32;
96 instr->cat6.iim_val = 1;
97 } else {
98 instr = ir3_BARY_F(block, inloc, 0, ctx->ij_pixel, 0);
99 instr->regs[2]->wrmask = 0x3;
100 }
101
102 return instr;
103 }
104
105 static struct ir3_instruction *
106 create_driver_param(struct ir3_context *ctx, enum ir3_driver_param dp)
107 {
108 /* first four vec4 sysval's reserved for UBOs: */
109 /* NOTE: dp is in scalar, but there can be >4 dp components: */
110 struct ir3_const_state *const_state = &ctx->so->shader->const_state;
111 unsigned n = const_state->offsets.driver_param;
112 unsigned r = regid(n + dp / 4, dp % 4);
113 return create_uniform(ctx->block, r);
114 }
115
116 /*
117 * Adreno uses uint rather than having dedicated bool type,
118 * which (potentially) requires some conversion, in particular
119 * when using output of an bool instr to int input, or visa
120 * versa.
121 *
122 * | Adreno | NIR |
123 * -------+---------+-------+-
124 * true | 1 | ~0 |
125 * false | 0 | 0 |
126 *
127 * To convert from an adreno bool (uint) to nir, use:
128 *
129 * absneg.s dst, (neg)src
130 *
131 * To convert back in the other direction:
132 *
133 * absneg.s dst, (abs)arc
134 *
135 * The CP step can clean up the absneg.s that cancel each other
136 * out, and with a slight bit of extra cleverness (to recognize
137 * the instructions which produce either a 0 or 1) can eliminate
138 * the absneg.s's completely when an instruction that wants
139 * 0/1 consumes the result. For example, when a nir 'bcsel'
140 * consumes the result of 'feq'. So we should be able to get by
141 * without a boolean resolve step, and without incuring any
142 * extra penalty in instruction count.
143 */
144
145 /* NIR bool -> native (adreno): */
146 static struct ir3_instruction *
147 ir3_b2n(struct ir3_block *block, struct ir3_instruction *instr)
148 {
149 return ir3_ABSNEG_S(block, instr, IR3_REG_SABS);
150 }
151
152 /* native (adreno) -> NIR bool: */
153 static struct ir3_instruction *
154 ir3_n2b(struct ir3_block *block, struct ir3_instruction *instr)
155 {
156 return ir3_ABSNEG_S(block, instr, IR3_REG_SNEG);
157 }
158
159 /*
160 * alu/sfu instructions:
161 */
162
163 static struct ir3_instruction *
164 create_cov(struct ir3_context *ctx, struct ir3_instruction *src,
165 unsigned src_bitsize, nir_op op)
166 {
167 type_t src_type, dst_type;
168
169 switch (op) {
170 case nir_op_f2f32:
171 case nir_op_f2f16_rtne:
172 case nir_op_f2f16_rtz:
173 case nir_op_f2f16:
174 case nir_op_f2i32:
175 case nir_op_f2i16:
176 case nir_op_f2i8:
177 case nir_op_f2u32:
178 case nir_op_f2u16:
179 case nir_op_f2u8:
180 switch (src_bitsize) {
181 case 32:
182 src_type = TYPE_F32;
183 break;
184 case 16:
185 src_type = TYPE_F16;
186 break;
187 default:
188 ir3_context_error(ctx, "invalid src bit size: %u", src_bitsize);
189 }
190 break;
191
192 case nir_op_i2f32:
193 case nir_op_i2f16:
194 case nir_op_i2i32:
195 case nir_op_i2i16:
196 case nir_op_i2i8:
197 switch (src_bitsize) {
198 case 32:
199 src_type = TYPE_S32;
200 break;
201 case 16:
202 src_type = TYPE_S16;
203 break;
204 case 8:
205 src_type = TYPE_S8;
206 break;
207 default:
208 ir3_context_error(ctx, "invalid src bit size: %u", src_bitsize);
209 }
210 break;
211
212 case nir_op_u2f32:
213 case nir_op_u2f16:
214 case nir_op_u2u32:
215 case nir_op_u2u16:
216 case nir_op_u2u8:
217 switch (src_bitsize) {
218 case 32:
219 src_type = TYPE_U32;
220 break;
221 case 16:
222 src_type = TYPE_U16;
223 break;
224 case 8:
225 src_type = TYPE_U8;
226 break;
227 default:
228 ir3_context_error(ctx, "invalid src bit size: %u", src_bitsize);
229 }
230 break;
231
232 default:
233 ir3_context_error(ctx, "invalid conversion op: %u", op);
234 }
235
236 switch (op) {
237 case nir_op_f2f32:
238 case nir_op_i2f32:
239 case nir_op_u2f32:
240 dst_type = TYPE_F32;
241 break;
242
243 case nir_op_f2f16_rtne:
244 case nir_op_f2f16_rtz:
245 case nir_op_f2f16:
246 /* TODO how to handle rounding mode? */
247 case nir_op_i2f16:
248 case nir_op_u2f16:
249 dst_type = TYPE_F16;
250 break;
251
252 case nir_op_f2i32:
253 case nir_op_i2i32:
254 dst_type = TYPE_S32;
255 break;
256
257 case nir_op_f2i16:
258 case nir_op_i2i16:
259 dst_type = TYPE_S16;
260 break;
261
262 case nir_op_f2i8:
263 case nir_op_i2i8:
264 dst_type = TYPE_S8;
265 break;
266
267 case nir_op_f2u32:
268 case nir_op_u2u32:
269 dst_type = TYPE_U32;
270 break;
271
272 case nir_op_f2u16:
273 case nir_op_u2u16:
274 dst_type = TYPE_U16;
275 break;
276
277 case nir_op_f2u8:
278 case nir_op_u2u8:
279 dst_type = TYPE_U8;
280 break;
281
282 default:
283 ir3_context_error(ctx, "invalid conversion op: %u", op);
284 }
285
286 return ir3_COV(ctx->block, src, src_type, dst_type);
287 }
288
289 static void
290 emit_alu(struct ir3_context *ctx, nir_alu_instr *alu)
291 {
292 const nir_op_info *info = &nir_op_infos[alu->op];
293 struct ir3_instruction **dst, *src[info->num_inputs];
294 unsigned bs[info->num_inputs]; /* bit size */
295 struct ir3_block *b = ctx->block;
296 unsigned dst_sz, wrmask;
297 type_t dst_type = nir_dest_bit_size(alu->dest.dest) < 32 ?
298 TYPE_U16 : TYPE_U32;
299
300 if (alu->dest.dest.is_ssa) {
301 dst_sz = alu->dest.dest.ssa.num_components;
302 wrmask = (1 << dst_sz) - 1;
303 } else {
304 dst_sz = alu->dest.dest.reg.reg->num_components;
305 wrmask = alu->dest.write_mask;
306 }
307
308 dst = ir3_get_dst(ctx, &alu->dest.dest, dst_sz);
309
310 /* Vectors are special in that they have non-scalarized writemasks,
311 * and just take the first swizzle channel for each argument in
312 * order into each writemask channel.
313 */
314 if ((alu->op == nir_op_vec2) ||
315 (alu->op == nir_op_vec3) ||
316 (alu->op == nir_op_vec4)) {
317
318 for (int i = 0; i < info->num_inputs; i++) {
319 nir_alu_src *asrc = &alu->src[i];
320
321 compile_assert(ctx, !asrc->abs);
322 compile_assert(ctx, !asrc->negate);
323
324 src[i] = ir3_get_src(ctx, &asrc->src)[asrc->swizzle[0]];
325 if (!src[i])
326 src[i] = create_immed_typed(ctx->block, 0, dst_type);
327 dst[i] = ir3_MOV(b, src[i], dst_type);
328 }
329
330 ir3_put_dst(ctx, &alu->dest.dest);
331 return;
332 }
333
334 /* We also get mov's with more than one component for mov's so
335 * handle those specially:
336 */
337 if (alu->op == nir_op_mov) {
338 nir_alu_src *asrc = &alu->src[0];
339 struct ir3_instruction *const *src0 = ir3_get_src(ctx, &asrc->src);
340
341 for (unsigned i = 0; i < dst_sz; i++) {
342 if (wrmask & (1 << i)) {
343 dst[i] = ir3_MOV(b, src0[asrc->swizzle[i]], dst_type);
344 } else {
345 dst[i] = NULL;
346 }
347 }
348
349 ir3_put_dst(ctx, &alu->dest.dest);
350 return;
351 }
352
353 /* General case: We can just grab the one used channel per src. */
354 for (int i = 0; i < info->num_inputs; i++) {
355 unsigned chan = ffs(alu->dest.write_mask) - 1;
356 nir_alu_src *asrc = &alu->src[i];
357
358 compile_assert(ctx, !asrc->abs);
359 compile_assert(ctx, !asrc->negate);
360
361 src[i] = ir3_get_src(ctx, &asrc->src)[asrc->swizzle[chan]];
362 bs[i] = nir_src_bit_size(asrc->src);
363
364 compile_assert(ctx, src[i]);
365 }
366
367 switch (alu->op) {
368 case nir_op_f2f32:
369 case nir_op_f2f16_rtne:
370 case nir_op_f2f16_rtz:
371 case nir_op_f2f16:
372 case nir_op_f2i32:
373 case nir_op_f2i16:
374 case nir_op_f2i8:
375 case nir_op_f2u32:
376 case nir_op_f2u16:
377 case nir_op_f2u8:
378 case nir_op_i2f32:
379 case nir_op_i2f16:
380 case nir_op_i2i32:
381 case nir_op_i2i16:
382 case nir_op_i2i8:
383 case nir_op_u2f32:
384 case nir_op_u2f16:
385 case nir_op_u2u32:
386 case nir_op_u2u16:
387 case nir_op_u2u8:
388 dst[0] = create_cov(ctx, src[0], bs[0], alu->op);
389 break;
390 case nir_op_fquantize2f16:
391 dst[0] = create_cov(ctx,
392 create_cov(ctx, src[0], 32, nir_op_f2f16),
393 16, nir_op_f2f32);
394 break;
395 case nir_op_f2b32:
396 dst[0] = ir3_CMPS_F(b, src[0], 0, create_immed(b, fui(0.0)), 0);
397 dst[0]->cat2.condition = IR3_COND_NE;
398 dst[0] = ir3_n2b(b, dst[0]);
399 break;
400 case nir_op_b2f16:
401 dst[0] = ir3_COV(b, ir3_b2n(b, src[0]), TYPE_U32, TYPE_F16);
402 break;
403 case nir_op_b2f32:
404 dst[0] = ir3_COV(b, ir3_b2n(b, src[0]), TYPE_U32, TYPE_F32);
405 break;
406 case nir_op_b2i8:
407 case nir_op_b2i16:
408 case nir_op_b2i32:
409 dst[0] = ir3_b2n(b, src[0]);
410 break;
411 case nir_op_i2b32:
412 dst[0] = ir3_CMPS_S(b, src[0], 0, create_immed(b, 0), 0);
413 dst[0]->cat2.condition = IR3_COND_NE;
414 dst[0] = ir3_n2b(b, dst[0]);
415 break;
416
417 case nir_op_fneg:
418 dst[0] = ir3_ABSNEG_F(b, src[0], IR3_REG_FNEG);
419 break;
420 case nir_op_fabs:
421 dst[0] = ir3_ABSNEG_F(b, src[0], IR3_REG_FABS);
422 break;
423 case nir_op_fmax:
424 dst[0] = ir3_MAX_F(b, src[0], 0, src[1], 0);
425 break;
426 case nir_op_fmin:
427 dst[0] = ir3_MIN_F(b, src[0], 0, src[1], 0);
428 break;
429 case nir_op_fsat:
430 /* if there is just a single use of the src, and it supports
431 * (sat) bit, we can just fold the (sat) flag back to the
432 * src instruction and create a mov. This is easier for cp
433 * to eliminate.
434 *
435 * TODO probably opc_cat==4 is ok too
436 */
437 if (alu->src[0].src.is_ssa &&
438 (list_length(&alu->src[0].src.ssa->uses) == 1) &&
439 ((opc_cat(src[0]->opc) == 2) || (opc_cat(src[0]->opc) == 3))) {
440 src[0]->flags |= IR3_INSTR_SAT;
441 dst[0] = ir3_MOV(b, src[0], dst_type);
442 } else {
443 /* otherwise generate a max.f that saturates.. blob does
444 * similar (generating a cat2 mov using max.f)
445 */
446 dst[0] = ir3_MAX_F(b, src[0], 0, src[0], 0);
447 dst[0]->flags |= IR3_INSTR_SAT;
448 }
449 break;
450 case nir_op_fmul:
451 dst[0] = ir3_MUL_F(b, src[0], 0, src[1], 0);
452 break;
453 case nir_op_fadd:
454 dst[0] = ir3_ADD_F(b, src[0], 0, src[1], 0);
455 break;
456 case nir_op_fsub:
457 dst[0] = ir3_ADD_F(b, src[0], 0, src[1], IR3_REG_FNEG);
458 break;
459 case nir_op_ffma:
460 dst[0] = ir3_MAD_F32(b, src[0], 0, src[1], 0, src[2], 0);
461 break;
462 case nir_op_fddx:
463 case nir_op_fddx_coarse:
464 dst[0] = ir3_DSX(b, src[0], 0);
465 dst[0]->cat5.type = TYPE_F32;
466 break;
467 case nir_op_fddy:
468 case nir_op_fddy_coarse:
469 dst[0] = ir3_DSY(b, src[0], 0);
470 dst[0]->cat5.type = TYPE_F32;
471 break;
472 break;
473 case nir_op_flt32:
474 dst[0] = ir3_CMPS_F(b, src[0], 0, src[1], 0);
475 dst[0]->cat2.condition = IR3_COND_LT;
476 dst[0] = ir3_n2b(b, dst[0]);
477 break;
478 case nir_op_fge32:
479 dst[0] = ir3_CMPS_F(b, src[0], 0, src[1], 0);
480 dst[0]->cat2.condition = IR3_COND_GE;
481 dst[0] = ir3_n2b(b, dst[0]);
482 break;
483 case nir_op_feq32:
484 dst[0] = ir3_CMPS_F(b, src[0], 0, src[1], 0);
485 dst[0]->cat2.condition = IR3_COND_EQ;
486 dst[0] = ir3_n2b(b, dst[0]);
487 break;
488 case nir_op_fne32:
489 dst[0] = ir3_CMPS_F(b, src[0], 0, src[1], 0);
490 dst[0]->cat2.condition = IR3_COND_NE;
491 dst[0] = ir3_n2b(b, dst[0]);
492 break;
493 case nir_op_fceil:
494 dst[0] = ir3_CEIL_F(b, src[0], 0);
495 break;
496 case nir_op_ffloor:
497 dst[0] = ir3_FLOOR_F(b, src[0], 0);
498 break;
499 case nir_op_ftrunc:
500 dst[0] = ir3_TRUNC_F(b, src[0], 0);
501 break;
502 case nir_op_fround_even:
503 dst[0] = ir3_RNDNE_F(b, src[0], 0);
504 break;
505 case nir_op_fsign:
506 dst[0] = ir3_SIGN_F(b, src[0], 0);
507 break;
508
509 case nir_op_fsin:
510 dst[0] = ir3_SIN(b, src[0], 0);
511 break;
512 case nir_op_fcos:
513 dst[0] = ir3_COS(b, src[0], 0);
514 break;
515 case nir_op_frsq:
516 dst[0] = ir3_RSQ(b, src[0], 0);
517 break;
518 case nir_op_frcp:
519 dst[0] = ir3_RCP(b, src[0], 0);
520 break;
521 case nir_op_flog2:
522 dst[0] = ir3_LOG2(b, src[0], 0);
523 break;
524 case nir_op_fexp2:
525 dst[0] = ir3_EXP2(b, src[0], 0);
526 break;
527 case nir_op_fsqrt:
528 dst[0] = ir3_SQRT(b, src[0], 0);
529 break;
530
531 case nir_op_iabs:
532 dst[0] = ir3_ABSNEG_S(b, src[0], IR3_REG_SABS);
533 break;
534 case nir_op_iadd:
535 dst[0] = ir3_ADD_U(b, src[0], 0, src[1], 0);
536 break;
537 case nir_op_iand:
538 dst[0] = ir3_AND_B(b, src[0], 0, src[1], 0);
539 break;
540 case nir_op_imax:
541 dst[0] = ir3_MAX_S(b, src[0], 0, src[1], 0);
542 break;
543 case nir_op_umax:
544 dst[0] = ir3_MAX_U(b, src[0], 0, src[1], 0);
545 break;
546 case nir_op_imin:
547 dst[0] = ir3_MIN_S(b, src[0], 0, src[1], 0);
548 break;
549 case nir_op_umin:
550 dst[0] = ir3_MIN_U(b, src[0], 0, src[1], 0);
551 break;
552 case nir_op_umul_low:
553 dst[0] = ir3_MULL_U(b, src[0], 0, src[1], 0);
554 break;
555 case nir_op_imadsh_mix16:
556 dst[0] = ir3_MADSH_M16(b, src[0], 0, src[1], 0, src[2], 0);
557 break;
558 case nir_op_imad24_ir3:
559 dst[0] = ir3_MAD_S24(b, src[0], 0, src[1], 0, src[2], 0);
560 break;
561 case nir_op_imul24:
562 dst[0] = ir3_MUL_S24(b, src[0], 0, src[1], 0);
563 break;
564 case nir_op_ineg:
565 dst[0] = ir3_ABSNEG_S(b, src[0], IR3_REG_SNEG);
566 break;
567 case nir_op_inot:
568 dst[0] = ir3_NOT_B(b, src[0], 0);
569 break;
570 case nir_op_ior:
571 dst[0] = ir3_OR_B(b, src[0], 0, src[1], 0);
572 break;
573 case nir_op_ishl:
574 dst[0] = ir3_SHL_B(b, src[0], 0, src[1], 0);
575 break;
576 case nir_op_ishr:
577 dst[0] = ir3_ASHR_B(b, src[0], 0, src[1], 0);
578 break;
579 case nir_op_isub:
580 dst[0] = ir3_SUB_U(b, src[0], 0, src[1], 0);
581 break;
582 case nir_op_ixor:
583 dst[0] = ir3_XOR_B(b, src[0], 0, src[1], 0);
584 break;
585 case nir_op_ushr:
586 dst[0] = ir3_SHR_B(b, src[0], 0, src[1], 0);
587 break;
588 case nir_op_ilt32:
589 dst[0] = ir3_CMPS_S(b, src[0], 0, src[1], 0);
590 dst[0]->cat2.condition = IR3_COND_LT;
591 dst[0] = ir3_n2b(b, dst[0]);
592 break;
593 case nir_op_ige32:
594 dst[0] = ir3_CMPS_S(b, src[0], 0, src[1], 0);
595 dst[0]->cat2.condition = IR3_COND_GE;
596 dst[0] = ir3_n2b(b, dst[0]);
597 break;
598 case nir_op_ieq32:
599 dst[0] = ir3_CMPS_S(b, src[0], 0, src[1], 0);
600 dst[0]->cat2.condition = IR3_COND_EQ;
601 dst[0] = ir3_n2b(b, dst[0]);
602 break;
603 case nir_op_ine32:
604 dst[0] = ir3_CMPS_S(b, src[0], 0, src[1], 0);
605 dst[0]->cat2.condition = IR3_COND_NE;
606 dst[0] = ir3_n2b(b, dst[0]);
607 break;
608 case nir_op_ult32:
609 dst[0] = ir3_CMPS_U(b, src[0], 0, src[1], 0);
610 dst[0]->cat2.condition = IR3_COND_LT;
611 dst[0] = ir3_n2b(b, dst[0]);
612 break;
613 case nir_op_uge32:
614 dst[0] = ir3_CMPS_U(b, src[0], 0, src[1], 0);
615 dst[0]->cat2.condition = IR3_COND_GE;
616 dst[0] = ir3_n2b(b, dst[0]);
617 break;
618
619 case nir_op_b32csel: {
620 struct ir3_instruction *cond = ir3_b2n(b, src[0]);
621 compile_assert(ctx, bs[1] == bs[2]);
622 /* the boolean condition is 32b even if src[1] and src[2] are
623 * half-precision, but sel.b16 wants all three src's to be the
624 * same type.
625 */
626 if (bs[1] < 32)
627 cond = ir3_COV(b, cond, TYPE_U32, TYPE_U16);
628 dst[0] = ir3_SEL_B32(b, src[1], 0, cond, 0, src[2], 0);
629 break;
630 }
631 case nir_op_bit_count: {
632 // TODO, we need to do this 16b at a time on a5xx+a6xx.. need to
633 // double check on earlier gen's. Once half-precision support is
634 // in place, this should probably move to a NIR lowering pass:
635 struct ir3_instruction *hi, *lo;
636
637 hi = ir3_COV(b, ir3_SHR_B(b, src[0], 0, create_immed(b, 16), 0),
638 TYPE_U32, TYPE_U16);
639 lo = ir3_COV(b, src[0], TYPE_U32, TYPE_U16);
640
641 hi = ir3_CBITS_B(b, hi, 0);
642 lo = ir3_CBITS_B(b, lo, 0);
643
644 // TODO maybe the builders should default to making dst half-precision
645 // if the src's were half precision, to make this less awkward.. otoh
646 // we should probably just do this lowering in NIR.
647 hi->regs[0]->flags |= IR3_REG_HALF;
648 lo->regs[0]->flags |= IR3_REG_HALF;
649
650 dst[0] = ir3_ADD_S(b, hi, 0, lo, 0);
651 dst[0]->regs[0]->flags |= IR3_REG_HALF;
652 dst[0] = ir3_COV(b, dst[0], TYPE_U16, TYPE_U32);
653 break;
654 }
655 case nir_op_ifind_msb: {
656 struct ir3_instruction *cmp;
657 dst[0] = ir3_CLZ_S(b, src[0], 0);
658 cmp = ir3_CMPS_S(b, dst[0], 0, create_immed(b, 0), 0);
659 cmp->cat2.condition = IR3_COND_GE;
660 dst[0] = ir3_SEL_B32(b,
661 ir3_SUB_U(b, create_immed(b, 31), 0, dst[0], 0), 0,
662 cmp, 0, dst[0], 0);
663 break;
664 }
665 case nir_op_ufind_msb:
666 dst[0] = ir3_CLZ_B(b, src[0], 0);
667 dst[0] = ir3_SEL_B32(b,
668 ir3_SUB_U(b, create_immed(b, 31), 0, dst[0], 0), 0,
669 src[0], 0, dst[0], 0);
670 break;
671 case nir_op_find_lsb:
672 dst[0] = ir3_BFREV_B(b, src[0], 0);
673 dst[0] = ir3_CLZ_B(b, dst[0], 0);
674 break;
675 case nir_op_bitfield_reverse:
676 dst[0] = ir3_BFREV_B(b, src[0], 0);
677 break;
678
679 default:
680 ir3_context_error(ctx, "Unhandled ALU op: %s\n",
681 nir_op_infos[alu->op].name);
682 break;
683 }
684
685 ir3_put_dst(ctx, &alu->dest.dest);
686 }
687
688 /* handles direct/indirect UBO reads: */
689 static void
690 emit_intrinsic_load_ubo(struct ir3_context *ctx, nir_intrinsic_instr *intr,
691 struct ir3_instruction **dst)
692 {
693 struct ir3_block *b = ctx->block;
694 struct ir3_instruction *base_lo, *base_hi, *addr, *src0, *src1;
695 /* UBO addresses are the first driver params, but subtract 2 here to
696 * account for nir_lower_uniforms_to_ubo rebasing the UBOs such that UBO 0
697 * is the uniforms: */
698 struct ir3_const_state *const_state = &ctx->so->shader->const_state;
699 unsigned ubo = regid(const_state->offsets.ubo, 0) - 2;
700 const unsigned ptrsz = ir3_pointer_size(ctx->compiler);
701
702 int off = 0;
703
704 /* First src is ubo index, which could either be an immed or not: */
705 src0 = ir3_get_src(ctx, &intr->src[0])[0];
706 if (is_same_type_mov(src0) &&
707 (src0->regs[1]->flags & IR3_REG_IMMED)) {
708 base_lo = create_uniform(b, ubo + (src0->regs[1]->iim_val * ptrsz));
709 base_hi = create_uniform(b, ubo + (src0->regs[1]->iim_val * ptrsz) + 1);
710 } else {
711 base_lo = create_uniform_indirect(b, ubo, ir3_get_addr(ctx, src0, ptrsz));
712 base_hi = create_uniform_indirect(b, ubo + 1, ir3_get_addr(ctx, src0, ptrsz));
713
714 /* NOTE: since relative addressing is used, make sure constlen is
715 * at least big enough to cover all the UBO addresses, since the
716 * assembler won't know what the max address reg is.
717 */
718 ctx->so->constlen = MAX2(ctx->so->constlen,
719 const_state->offsets.ubo + (ctx->s->info.num_ubos * ptrsz));
720 }
721
722 /* note: on 32bit gpu's base_hi is ignored and DCE'd */
723 addr = base_lo;
724
725 if (nir_src_is_const(intr->src[1])) {
726 off += nir_src_as_uint(intr->src[1]);
727 } else {
728 /* For load_ubo_indirect, second src is indirect offset: */
729 src1 = ir3_get_src(ctx, &intr->src[1])[0];
730
731 /* and add offset to addr: */
732 addr = ir3_ADD_S(b, addr, 0, src1, 0);
733 }
734
735 /* if offset is to large to encode in the ldg, split it out: */
736 if ((off + (intr->num_components * 4)) > 1024) {
737 /* split out the minimal amount to improve the odds that
738 * cp can fit the immediate in the add.s instruction:
739 */
740 unsigned off2 = off + (intr->num_components * 4) - 1024;
741 addr = ir3_ADD_S(b, addr, 0, create_immed(b, off2), 0);
742 off -= off2;
743 }
744
745 if (ptrsz == 2) {
746 struct ir3_instruction *carry;
747
748 /* handle 32b rollover, ie:
749 * if (addr < base_lo)
750 * base_hi++
751 */
752 carry = ir3_CMPS_U(b, addr, 0, base_lo, 0);
753 carry->cat2.condition = IR3_COND_LT;
754 base_hi = ir3_ADD_S(b, base_hi, 0, carry, 0);
755
756 addr = ir3_create_collect(ctx, (struct ir3_instruction*[]){ addr, base_hi }, 2);
757 }
758
759 for (int i = 0; i < intr->num_components; i++) {
760 struct ir3_instruction *load =
761 ir3_LDG(b, addr, 0, create_immed(b, 1), 0, /* num components */
762 create_immed(b, off + i * 4), 0);
763 load->cat6.type = TYPE_U32;
764 dst[i] = load;
765 }
766 }
767
768 /* src[] = { block_index } */
769 static void
770 emit_intrinsic_ssbo_size(struct ir3_context *ctx, nir_intrinsic_instr *intr,
771 struct ir3_instruction **dst)
772 {
773 /* SSBO size stored as a const starting at ssbo_sizes: */
774 struct ir3_const_state *const_state = &ctx->so->shader->const_state;
775 unsigned blk_idx = nir_src_as_uint(intr->src[0]);
776 unsigned idx = regid(const_state->offsets.ssbo_sizes, 0) +
777 const_state->ssbo_size.off[blk_idx];
778
779 debug_assert(const_state->ssbo_size.mask & (1 << blk_idx));
780
781 dst[0] = create_uniform(ctx->block, idx);
782 }
783
784 /* src[] = { offset }. const_index[] = { base } */
785 static void
786 emit_intrinsic_load_shared(struct ir3_context *ctx, nir_intrinsic_instr *intr,
787 struct ir3_instruction **dst)
788 {
789 struct ir3_block *b = ctx->block;
790 struct ir3_instruction *ldl, *offset;
791 unsigned base;
792
793 offset = ir3_get_src(ctx, &intr->src[0])[0];
794 base = nir_intrinsic_base(intr);
795
796 ldl = ir3_LDL(b, offset, 0,
797 create_immed(b, intr->num_components), 0,
798 create_immed(b, base), 0);
799
800 ldl->cat6.type = utype_dst(intr->dest);
801 ldl->regs[0]->wrmask = MASK(intr->num_components);
802
803 ldl->barrier_class = IR3_BARRIER_SHARED_R;
804 ldl->barrier_conflict = IR3_BARRIER_SHARED_W;
805
806 ir3_split_dest(b, dst, ldl, 0, intr->num_components);
807 }
808
809 /* src[] = { value, offset }. const_index[] = { base, write_mask } */
810 static void
811 emit_intrinsic_store_shared(struct ir3_context *ctx, nir_intrinsic_instr *intr)
812 {
813 struct ir3_block *b = ctx->block;
814 struct ir3_instruction *stl, *offset;
815 struct ir3_instruction * const *value;
816 unsigned base, wrmask;
817
818 value = ir3_get_src(ctx, &intr->src[0]);
819 offset = ir3_get_src(ctx, &intr->src[1])[0];
820
821 base = nir_intrinsic_base(intr);
822 wrmask = nir_intrinsic_write_mask(intr);
823
824 /* Combine groups of consecutive enabled channels in one write
825 * message. We use ffs to find the first enabled channel and then ffs on
826 * the bit-inverse, down-shifted writemask to determine the length of
827 * the block of enabled bits.
828 *
829 * (trick stolen from i965's fs_visitor::nir_emit_cs_intrinsic())
830 */
831 while (wrmask) {
832 unsigned first_component = ffs(wrmask) - 1;
833 unsigned length = ffs(~(wrmask >> first_component)) - 1;
834
835 stl = ir3_STL(b, offset, 0,
836 ir3_create_collect(ctx, &value[first_component], length), 0,
837 create_immed(b, length), 0);
838 stl->cat6.dst_offset = first_component + base;
839 stl->cat6.type = utype_src(intr->src[0]);
840 stl->barrier_class = IR3_BARRIER_SHARED_W;
841 stl->barrier_conflict = IR3_BARRIER_SHARED_R | IR3_BARRIER_SHARED_W;
842
843 array_insert(b, b->keeps, stl);
844
845 /* Clear the bits in the writemask that we just wrote, then try
846 * again to see if more channels are left.
847 */
848 wrmask &= (15 << (first_component + length));
849 }
850 }
851
852 /* src[] = { offset }. const_index[] = { base } */
853 static void
854 emit_intrinsic_load_shared_ir3(struct ir3_context *ctx, nir_intrinsic_instr *intr,
855 struct ir3_instruction **dst)
856 {
857 struct ir3_block *b = ctx->block;
858 struct ir3_instruction *load, *offset;
859 unsigned base;
860
861 offset = ir3_get_src(ctx, &intr->src[0])[0];
862 base = nir_intrinsic_base(intr);
863
864 load = ir3_LDLW(b, offset, 0,
865 create_immed(b, intr->num_components), 0,
866 create_immed(b, base), 0);
867
868 load->cat6.type = utype_dst(intr->dest);
869 load->regs[0]->wrmask = MASK(intr->num_components);
870
871 load->barrier_class = IR3_BARRIER_SHARED_R;
872 load->barrier_conflict = IR3_BARRIER_SHARED_W;
873
874 ir3_split_dest(b, dst, load, 0, intr->num_components);
875 }
876
877 /* src[] = { value, offset }. const_index[] = { base, write_mask } */
878 static void
879 emit_intrinsic_store_shared_ir3(struct ir3_context *ctx, nir_intrinsic_instr *intr)
880 {
881 struct ir3_block *b = ctx->block;
882 struct ir3_instruction *store, *offset;
883 struct ir3_instruction * const *value;
884 unsigned base, wrmask;
885
886 value = ir3_get_src(ctx, &intr->src[0]);
887 offset = ir3_get_src(ctx, &intr->src[1])[0];
888
889 base = nir_intrinsic_base(intr);
890 wrmask = nir_intrinsic_write_mask(intr);
891
892 /* Combine groups of consecutive enabled channels in one write
893 * message. We use ffs to find the first enabled channel and then ffs on
894 * the bit-inverse, down-shifted writemask to determine the length of
895 * the block of enabled bits.
896 *
897 * (trick stolen from i965's fs_visitor::nir_emit_cs_intrinsic())
898 */
899 while (wrmask) {
900 unsigned first_component = ffs(wrmask) - 1;
901 unsigned length = ffs(~(wrmask >> first_component)) - 1;
902
903 store = ir3_STLW(b, offset, 0,
904 ir3_create_collect(ctx, &value[first_component], length), 0,
905 create_immed(b, length), 0);
906
907 store->cat6.dst_offset = first_component + base;
908 store->cat6.type = utype_src(intr->src[0]);
909 store->barrier_class = IR3_BARRIER_SHARED_W;
910 store->barrier_conflict = IR3_BARRIER_SHARED_R | IR3_BARRIER_SHARED_W;
911
912 array_insert(b, b->keeps, store);
913
914 /* Clear the bits in the writemask that we just wrote, then try
915 * again to see if more channels are left.
916 */
917 wrmask &= (15 << (first_component + length));
918 }
919 }
920
921 /*
922 * CS shared variable atomic intrinsics
923 *
924 * All of the shared variable atomic memory operations read a value from
925 * memory, compute a new value using one of the operations below, write the
926 * new value to memory, and return the original value read.
927 *
928 * All operations take 2 sources except CompSwap that takes 3. These
929 * sources represent:
930 *
931 * 0: The offset into the shared variable storage region that the atomic
932 * operation will operate on.
933 * 1: The data parameter to the atomic function (i.e. the value to add
934 * in shared_atomic_add, etc).
935 * 2: For CompSwap only: the second data parameter.
936 */
937 static struct ir3_instruction *
938 emit_intrinsic_atomic_shared(struct ir3_context *ctx, nir_intrinsic_instr *intr)
939 {
940 struct ir3_block *b = ctx->block;
941 struct ir3_instruction *atomic, *src0, *src1;
942 type_t type = TYPE_U32;
943
944 src0 = ir3_get_src(ctx, &intr->src[0])[0]; /* offset */
945 src1 = ir3_get_src(ctx, &intr->src[1])[0]; /* value */
946
947 switch (intr->intrinsic) {
948 case nir_intrinsic_shared_atomic_add:
949 atomic = ir3_ATOMIC_ADD(b, src0, 0, src1, 0);
950 break;
951 case nir_intrinsic_shared_atomic_imin:
952 atomic = ir3_ATOMIC_MIN(b, src0, 0, src1, 0);
953 type = TYPE_S32;
954 break;
955 case nir_intrinsic_shared_atomic_umin:
956 atomic = ir3_ATOMIC_MIN(b, src0, 0, src1, 0);
957 break;
958 case nir_intrinsic_shared_atomic_imax:
959 atomic = ir3_ATOMIC_MAX(b, src0, 0, src1, 0);
960 type = TYPE_S32;
961 break;
962 case nir_intrinsic_shared_atomic_umax:
963 atomic = ir3_ATOMIC_MAX(b, src0, 0, src1, 0);
964 break;
965 case nir_intrinsic_shared_atomic_and:
966 atomic = ir3_ATOMIC_AND(b, src0, 0, src1, 0);
967 break;
968 case nir_intrinsic_shared_atomic_or:
969 atomic = ir3_ATOMIC_OR(b, src0, 0, src1, 0);
970 break;
971 case nir_intrinsic_shared_atomic_xor:
972 atomic = ir3_ATOMIC_XOR(b, src0, 0, src1, 0);
973 break;
974 case nir_intrinsic_shared_atomic_exchange:
975 atomic = ir3_ATOMIC_XCHG(b, src0, 0, src1, 0);
976 break;
977 case nir_intrinsic_shared_atomic_comp_swap:
978 /* for cmpxchg, src1 is [ui]vec2(data, compare): */
979 src1 = ir3_create_collect(ctx, (struct ir3_instruction*[]){
980 ir3_get_src(ctx, &intr->src[2])[0],
981 src1,
982 }, 2);
983 atomic = ir3_ATOMIC_CMPXCHG(b, src0, 0, src1, 0);
984 break;
985 default:
986 unreachable("boo");
987 }
988
989 atomic->cat6.iim_val = 1;
990 atomic->cat6.d = 1;
991 atomic->cat6.type = type;
992 atomic->barrier_class = IR3_BARRIER_SHARED_W;
993 atomic->barrier_conflict = IR3_BARRIER_SHARED_R | IR3_BARRIER_SHARED_W;
994
995 /* even if nothing consume the result, we can't DCE the instruction: */
996 array_insert(b, b->keeps, atomic);
997
998 return atomic;
999 }
1000
1001 /* TODO handle actual indirect/dynamic case.. which is going to be weird
1002 * to handle with the image_mapping table..
1003 */
1004 static struct ir3_instruction *
1005 get_image_samp_tex_src(struct ir3_context *ctx, nir_intrinsic_instr *intr)
1006 {
1007 unsigned slot = ir3_get_image_slot(nir_src_as_deref(intr->src[0]));
1008 unsigned tex_idx = ir3_image_to_tex(&ctx->so->image_mapping, slot);
1009 struct ir3_instruction *texture, *sampler;
1010
1011 texture = create_immed_typed(ctx->block, tex_idx, TYPE_U16);
1012 sampler = create_immed_typed(ctx->block, tex_idx, TYPE_U16);
1013
1014 return ir3_create_collect(ctx, (struct ir3_instruction*[]){
1015 sampler,
1016 texture,
1017 }, 2);
1018 }
1019
1020 /* src[] = { deref, coord, sample_index }. const_index[] = {} */
1021 static void
1022 emit_intrinsic_load_image(struct ir3_context *ctx, nir_intrinsic_instr *intr,
1023 struct ir3_instruction **dst)
1024 {
1025 struct ir3_block *b = ctx->block;
1026 const nir_variable *var = nir_intrinsic_get_var(intr, 0);
1027 struct ir3_instruction *samp_tex = get_image_samp_tex_src(ctx, intr);
1028 struct ir3_instruction *sam;
1029 struct ir3_instruction * const *src0 = ir3_get_src(ctx, &intr->src[1]);
1030 struct ir3_instruction *coords[4];
1031 unsigned flags, ncoords = ir3_get_image_coords(var, &flags);
1032 type_t type = ir3_get_image_type(var);
1033
1034 /* hmm, this seems a bit odd, but it is what blob does and (at least
1035 * a5xx) just faults on bogus addresses otherwise:
1036 */
1037 if (flags & IR3_INSTR_3D) {
1038 flags &= ~IR3_INSTR_3D;
1039 flags |= IR3_INSTR_A;
1040 }
1041
1042 for (unsigned i = 0; i < ncoords; i++)
1043 coords[i] = src0[i];
1044
1045 if (ncoords == 1)
1046 coords[ncoords++] = create_immed(b, 0);
1047
1048 sam = ir3_SAM(b, OPC_ISAM, type, 0b1111, flags,
1049 samp_tex, ir3_create_collect(ctx, coords, ncoords), NULL);
1050
1051 sam->barrier_class = IR3_BARRIER_IMAGE_R;
1052 sam->barrier_conflict = IR3_BARRIER_IMAGE_W;
1053
1054 ir3_split_dest(b, dst, sam, 0, 4);
1055 }
1056
1057 static void
1058 emit_intrinsic_image_size(struct ir3_context *ctx, nir_intrinsic_instr *intr,
1059 struct ir3_instruction **dst)
1060 {
1061 struct ir3_block *b = ctx->block;
1062 const nir_variable *var = nir_intrinsic_get_var(intr, 0);
1063 struct ir3_instruction *samp_tex = get_image_samp_tex_src(ctx, intr);
1064 struct ir3_instruction *sam, *lod;
1065 unsigned flags, ncoords = ir3_get_image_coords(var, &flags);
1066
1067 lod = create_immed(b, 0);
1068 sam = ir3_SAM(b, OPC_GETSIZE, TYPE_U32, 0b1111, flags,
1069 samp_tex, lod, NULL);
1070
1071 /* Array size actually ends up in .w rather than .z. This doesn't
1072 * matter for miplevel 0, but for higher mips the value in z is
1073 * minified whereas w stays. Also, the value in TEX_CONST_3_DEPTH is
1074 * returned, which means that we have to add 1 to it for arrays for
1075 * a3xx.
1076 *
1077 * Note use a temporary dst and then copy, since the size of the dst
1078 * array that is passed in is based on nir's understanding of the
1079 * result size, not the hardware's
1080 */
1081 struct ir3_instruction *tmp[4];
1082
1083 ir3_split_dest(b, tmp, sam, 0, 4);
1084
1085 /* get_size instruction returns size in bytes instead of texels
1086 * for imageBuffer, so we need to divide it by the pixel size
1087 * of the image format.
1088 *
1089 * TODO: This is at least true on a5xx. Check other gens.
1090 */
1091 enum glsl_sampler_dim dim =
1092 glsl_get_sampler_dim(glsl_without_array(var->type));
1093 if (dim == GLSL_SAMPLER_DIM_BUF) {
1094 /* Since all the possible values the divisor can take are
1095 * power-of-two (4, 8, or 16), the division is implemented
1096 * as a shift-right.
1097 * During shader setup, the log2 of the image format's
1098 * bytes-per-pixel should have been emitted in 2nd slot of
1099 * image_dims. See ir3_shader::emit_image_dims().
1100 */
1101 struct ir3_const_state *const_state = &ctx->so->shader->const_state;
1102 unsigned cb = regid(const_state->offsets.image_dims, 0) +
1103 const_state->image_dims.off[var->data.driver_location];
1104 struct ir3_instruction *aux = create_uniform(b, cb + 1);
1105
1106 tmp[0] = ir3_SHR_B(b, tmp[0], 0, aux, 0);
1107 }
1108
1109 for (unsigned i = 0; i < ncoords; i++)
1110 dst[i] = tmp[i];
1111
1112 if (flags & IR3_INSTR_A) {
1113 if (ctx->compiler->levels_add_one) {
1114 dst[ncoords-1] = ir3_ADD_U(b, tmp[3], 0, create_immed(b, 1), 0);
1115 } else {
1116 dst[ncoords-1] = ir3_MOV(b, tmp[3], TYPE_U32);
1117 }
1118 }
1119 }
1120
1121 static void
1122 emit_intrinsic_barrier(struct ir3_context *ctx, nir_intrinsic_instr *intr)
1123 {
1124 struct ir3_block *b = ctx->block;
1125 struct ir3_instruction *barrier;
1126
1127 switch (intr->intrinsic) {
1128 case nir_intrinsic_barrier:
1129 barrier = ir3_BAR(b);
1130 barrier->cat7.g = true;
1131 barrier->cat7.l = true;
1132 barrier->flags = IR3_INSTR_SS | IR3_INSTR_SY;
1133 barrier->barrier_class = IR3_BARRIER_EVERYTHING;
1134 break;
1135 case nir_intrinsic_memory_barrier:
1136 barrier = ir3_FENCE(b);
1137 barrier->cat7.g = true;
1138 barrier->cat7.r = true;
1139 barrier->cat7.w = true;
1140 barrier->cat7.l = true;
1141 barrier->barrier_class = IR3_BARRIER_IMAGE_W |
1142 IR3_BARRIER_BUFFER_W;
1143 barrier->barrier_conflict =
1144 IR3_BARRIER_IMAGE_R | IR3_BARRIER_IMAGE_W |
1145 IR3_BARRIER_BUFFER_R | IR3_BARRIER_BUFFER_W;
1146 break;
1147 case nir_intrinsic_memory_barrier_atomic_counter:
1148 case nir_intrinsic_memory_barrier_buffer:
1149 barrier = ir3_FENCE(b);
1150 barrier->cat7.g = true;
1151 barrier->cat7.r = true;
1152 barrier->cat7.w = true;
1153 barrier->barrier_class = IR3_BARRIER_BUFFER_W;
1154 barrier->barrier_conflict = IR3_BARRIER_BUFFER_R |
1155 IR3_BARRIER_BUFFER_W;
1156 break;
1157 case nir_intrinsic_memory_barrier_image:
1158 // TODO double check if this should have .g set
1159 barrier = ir3_FENCE(b);
1160 barrier->cat7.g = true;
1161 barrier->cat7.r = true;
1162 barrier->cat7.w = true;
1163 barrier->barrier_class = IR3_BARRIER_IMAGE_W;
1164 barrier->barrier_conflict = IR3_BARRIER_IMAGE_R |
1165 IR3_BARRIER_IMAGE_W;
1166 break;
1167 case nir_intrinsic_memory_barrier_shared:
1168 barrier = ir3_FENCE(b);
1169 barrier->cat7.g = true;
1170 barrier->cat7.l = true;
1171 barrier->cat7.r = true;
1172 barrier->cat7.w = true;
1173 barrier->barrier_class = IR3_BARRIER_SHARED_W;
1174 barrier->barrier_conflict = IR3_BARRIER_SHARED_R |
1175 IR3_BARRIER_SHARED_W;
1176 break;
1177 case nir_intrinsic_group_memory_barrier:
1178 barrier = ir3_FENCE(b);
1179 barrier->cat7.g = true;
1180 barrier->cat7.l = true;
1181 barrier->cat7.r = true;
1182 barrier->cat7.w = true;
1183 barrier->barrier_class = IR3_BARRIER_SHARED_W |
1184 IR3_BARRIER_IMAGE_W |
1185 IR3_BARRIER_BUFFER_W;
1186 barrier->barrier_conflict =
1187 IR3_BARRIER_SHARED_R | IR3_BARRIER_SHARED_W |
1188 IR3_BARRIER_IMAGE_R | IR3_BARRIER_IMAGE_W |
1189 IR3_BARRIER_BUFFER_R | IR3_BARRIER_BUFFER_W;
1190 break;
1191 default:
1192 unreachable("boo");
1193 }
1194
1195 /* make sure barrier doesn't get DCE'd */
1196 array_insert(b, b->keeps, barrier);
1197 }
1198
1199 static void add_sysval_input_compmask(struct ir3_context *ctx,
1200 gl_system_value slot, unsigned compmask,
1201 struct ir3_instruction *instr)
1202 {
1203 struct ir3_shader_variant *so = ctx->so;
1204 unsigned r = regid(so->inputs_count, 0);
1205 unsigned n = so->inputs_count++;
1206
1207 assert(instr->opc == OPC_META_INPUT);
1208 instr->input.sysval = slot;
1209
1210 so->inputs[n].sysval = true;
1211 so->inputs[n].slot = slot;
1212 so->inputs[n].compmask = compmask;
1213 so->inputs[n].regid = r;
1214 so->inputs[n].interpolate = INTERP_MODE_FLAT;
1215 so->total_in++;
1216
1217 ctx->ir->ninputs = MAX2(ctx->ir->ninputs, r + 1);
1218 ctx->ir->inputs[r] = instr;
1219 }
1220
1221 static void add_sysval_input(struct ir3_context *ctx, gl_system_value slot,
1222 struct ir3_instruction *instr)
1223 {
1224 add_sysval_input_compmask(ctx, slot, 0x1, instr);
1225 }
1226
1227 static struct ir3_instruction *
1228 get_barycentric_centroid(struct ir3_context *ctx)
1229 {
1230 if (!ctx->ij_centroid) {
1231 struct ir3_instruction *xy[2];
1232 struct ir3_instruction *ij;
1233
1234 ij = create_input_compmask(ctx, 0, 0x3);
1235 ir3_split_dest(ctx->block, xy, ij, 0, 2);
1236
1237 ctx->ij_centroid = ir3_create_collect(ctx, xy, 2);
1238
1239 add_sysval_input_compmask(ctx,
1240 SYSTEM_VALUE_BARYCENTRIC_CENTROID,
1241 0x3, ij);
1242 }
1243
1244 return ctx->ij_centroid;
1245 }
1246
1247 static struct ir3_instruction *
1248 get_barycentric_sample(struct ir3_context *ctx)
1249 {
1250 if (!ctx->ij_sample) {
1251 struct ir3_instruction *xy[2];
1252 struct ir3_instruction *ij;
1253
1254 ij = create_input_compmask(ctx, 0, 0x3);
1255 ir3_split_dest(ctx->block, xy, ij, 0, 2);
1256
1257 ctx->ij_sample = ir3_create_collect(ctx, xy, 2);
1258
1259 add_sysval_input_compmask(ctx,
1260 SYSTEM_VALUE_BARYCENTRIC_SAMPLE,
1261 0x3, ij);
1262 }
1263
1264 return ctx->ij_sample;
1265 }
1266
1267 static struct ir3_instruction *
1268 get_barycentric_pixel(struct ir3_context *ctx)
1269 {
1270 /* TODO when tgsi_to_nir supports "new-style" FS inputs switch
1271 * this to create ij_pixel only on demand:
1272 */
1273 return ctx->ij_pixel;
1274 }
1275
1276 static struct ir3_instruction *
1277 get_frag_coord(struct ir3_context *ctx)
1278 {
1279 if (!ctx->frag_coord) {
1280 struct ir3_block *b = ctx->block;
1281 struct ir3_instruction *xyzw[4];
1282 struct ir3_instruction *hw_frag_coord;
1283
1284 hw_frag_coord = create_input_compmask(ctx, 0, 0xf);
1285 ir3_split_dest(ctx->block, xyzw, hw_frag_coord, 0, 4);
1286
1287 /* for frag_coord.xy, we get unsigned values.. we need
1288 * to subtract (integer) 8 and divide by 16 (right-
1289 * shift by 4) then convert to float:
1290 *
1291 * sub.s tmp, src, 8
1292 * shr.b tmp, tmp, 4
1293 * mov.u32f32 dst, tmp
1294 *
1295 */
1296 for (int i = 0; i < 2; i++) {
1297 xyzw[i] = ir3_SUB_S(b, xyzw[i], 0,
1298 create_immed(b, 8), 0);
1299 xyzw[i] = ir3_SHR_B(b, xyzw[i], 0,
1300 create_immed(b, 4), 0);
1301 xyzw[i] = ir3_COV(b, xyzw[i], TYPE_U32, TYPE_F32);
1302 }
1303
1304 ctx->frag_coord = ir3_create_collect(ctx, xyzw, 4);
1305
1306 add_sysval_input_compmask(ctx,
1307 SYSTEM_VALUE_FRAG_COORD,
1308 0xf, hw_frag_coord);
1309
1310 ctx->so->frag_coord = true;
1311 }
1312
1313 return ctx->frag_coord;
1314 }
1315
1316 static void
1317 emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr)
1318 {
1319 const nir_intrinsic_info *info = &nir_intrinsic_infos[intr->intrinsic];
1320 struct ir3_instruction **dst;
1321 struct ir3_instruction * const *src;
1322 struct ir3_block *b = ctx->block;
1323 int idx, comp;
1324
1325 if (info->has_dest) {
1326 unsigned n = nir_intrinsic_dest_components(intr);
1327 dst = ir3_get_dst(ctx, &intr->dest, n);
1328 } else {
1329 dst = NULL;
1330 }
1331
1332 const unsigned primitive_param = ctx->so->shader->const_state.offsets.primitive_param * 4;
1333 const unsigned primitive_map = ctx->so->shader->const_state.offsets.primitive_map * 4;
1334
1335 switch (intr->intrinsic) {
1336 case nir_intrinsic_load_uniform:
1337 idx = nir_intrinsic_base(intr);
1338 if (nir_src_is_const(intr->src[0])) {
1339 idx += nir_src_as_uint(intr->src[0]);
1340 for (int i = 0; i < intr->num_components; i++) {
1341 dst[i] = create_uniform_typed(b, idx + i,
1342 nir_dest_bit_size(intr->dest) < 32 ? TYPE_F16 : TYPE_F32);
1343 }
1344 } else {
1345 src = ir3_get_src(ctx, &intr->src[0]);
1346 for (int i = 0; i < intr->num_components; i++) {
1347 dst[i] = create_uniform_indirect(b, idx + i,
1348 ir3_get_addr(ctx, src[0], 1));
1349 }
1350 /* NOTE: if relative addressing is used, we set
1351 * constlen in the compiler (to worst-case value)
1352 * since we don't know in the assembler what the max
1353 * addr reg value can be:
1354 */
1355 ctx->so->constlen = MAX2(ctx->so->constlen,
1356 ctx->so->shader->ubo_state.size / 16);
1357 }
1358 break;
1359
1360 case nir_intrinsic_load_vs_primitive_stride_ir3:
1361 dst[0] = create_uniform(b, primitive_param + 0);
1362 break;
1363 case nir_intrinsic_load_vs_vertex_stride_ir3:
1364 dst[0] = create_uniform(b, primitive_param + 1);
1365 break;
1366 case nir_intrinsic_load_primitive_location_ir3:
1367 idx = nir_intrinsic_driver_location(intr);
1368 dst[0] = create_uniform(b, primitive_map + idx);
1369 break;
1370
1371 case nir_intrinsic_load_gs_header_ir3:
1372 dst[0] = ctx->gs_header;
1373 break;
1374
1375 case nir_intrinsic_load_primitive_id:
1376 dst[0] = ctx->primitive_id;
1377 break;
1378
1379 case nir_intrinsic_load_ubo:
1380 emit_intrinsic_load_ubo(ctx, intr, dst);
1381 break;
1382 case nir_intrinsic_load_frag_coord:
1383 ir3_split_dest(b, dst, get_frag_coord(ctx), 0, 4);
1384 break;
1385 case nir_intrinsic_load_sample_pos_from_id: {
1386 /* NOTE: blob seems to always use TYPE_F16 and then cov.f16f32,
1387 * but that doesn't seem necessary.
1388 */
1389 struct ir3_instruction *offset =
1390 ir3_RGETPOS(b, ir3_get_src(ctx, &intr->src[0])[0], 0);
1391 offset->regs[0]->wrmask = 0x3;
1392 offset->cat5.type = TYPE_F32;
1393
1394 ir3_split_dest(b, dst, offset, 0, 2);
1395
1396 break;
1397 }
1398 case nir_intrinsic_load_size_ir3:
1399 if (!ctx->ij_size) {
1400 ctx->ij_size = create_input(ctx, 0);
1401
1402 add_sysval_input(ctx, SYSTEM_VALUE_BARYCENTRIC_SIZE,
1403 ctx->ij_size);
1404 }
1405 dst[0] = ctx->ij_size;
1406 break;
1407 case nir_intrinsic_load_barycentric_centroid:
1408 ir3_split_dest(b, dst, get_barycentric_centroid(ctx), 0, 2);
1409 break;
1410 case nir_intrinsic_load_barycentric_sample:
1411 if (ctx->so->key.msaa) {
1412 ir3_split_dest(b, dst, get_barycentric_sample(ctx), 0, 2);
1413 } else {
1414 ir3_split_dest(b, dst, get_barycentric_pixel(ctx), 0, 2);
1415 }
1416 break;
1417 case nir_intrinsic_load_barycentric_pixel:
1418 ir3_split_dest(b, dst, get_barycentric_pixel(ctx), 0, 2);
1419 break;
1420 case nir_intrinsic_load_interpolated_input:
1421 idx = nir_intrinsic_base(intr);
1422 comp = nir_intrinsic_component(intr);
1423 src = ir3_get_src(ctx, &intr->src[0]);
1424 if (nir_src_is_const(intr->src[1])) {
1425 struct ir3_instruction *coord = ir3_create_collect(ctx, src, 2);
1426 idx += nir_src_as_uint(intr->src[1]);
1427 for (int i = 0; i < intr->num_components; i++) {
1428 unsigned inloc = idx * 4 + i + comp;
1429 if (ctx->so->inputs[idx].bary &&
1430 !ctx->so->inputs[idx].use_ldlv) {
1431 dst[i] = ir3_BARY_F(b, create_immed(b, inloc), 0, coord, 0);
1432 } else {
1433 /* for non-varyings use the pre-setup input, since
1434 * that is easier than mapping things back to a
1435 * nir_variable to figure out what it is.
1436 */
1437 dst[i] = ctx->ir->inputs[inloc];
1438 }
1439 }
1440 } else {
1441 ir3_context_error(ctx, "unhandled");
1442 }
1443 break;
1444 case nir_intrinsic_load_input:
1445 idx = nir_intrinsic_base(intr);
1446 comp = nir_intrinsic_component(intr);
1447 if (nir_src_is_const(intr->src[0])) {
1448 idx += nir_src_as_uint(intr->src[0]);
1449 for (int i = 0; i < intr->num_components; i++) {
1450 unsigned n = idx * 4 + i + comp;
1451 dst[i] = ctx->ir->inputs[n];
1452 compile_assert(ctx, ctx->ir->inputs[n]);
1453 }
1454 } else {
1455 src = ir3_get_src(ctx, &intr->src[0]);
1456 struct ir3_instruction *collect =
1457 ir3_create_collect(ctx, ctx->ir->inputs, ctx->ir->ninputs);
1458 struct ir3_instruction *addr = ir3_get_addr(ctx, src[0], 4);
1459 for (int i = 0; i < intr->num_components; i++) {
1460 unsigned n = idx * 4 + i + comp;
1461 dst[i] = create_indirect_load(ctx, ctx->ir->ninputs,
1462 n, addr, collect);
1463 }
1464 }
1465 break;
1466 /* All SSBO intrinsics should have been lowered by 'lower_io_offsets'
1467 * pass and replaced by an ir3-specifc version that adds the
1468 * dword-offset in the last source.
1469 */
1470 case nir_intrinsic_load_ssbo_ir3:
1471 ctx->funcs->emit_intrinsic_load_ssbo(ctx, intr, dst);
1472 break;
1473 case nir_intrinsic_store_ssbo_ir3:
1474 if ((ctx->so->type == MESA_SHADER_FRAGMENT) &&
1475 !ctx->s->info.fs.early_fragment_tests)
1476 ctx->so->no_earlyz = true;
1477 ctx->funcs->emit_intrinsic_store_ssbo(ctx, intr);
1478 break;
1479 case nir_intrinsic_get_buffer_size:
1480 emit_intrinsic_ssbo_size(ctx, intr, dst);
1481 break;
1482 case nir_intrinsic_ssbo_atomic_add_ir3:
1483 case nir_intrinsic_ssbo_atomic_imin_ir3:
1484 case nir_intrinsic_ssbo_atomic_umin_ir3:
1485 case nir_intrinsic_ssbo_atomic_imax_ir3:
1486 case nir_intrinsic_ssbo_atomic_umax_ir3:
1487 case nir_intrinsic_ssbo_atomic_and_ir3:
1488 case nir_intrinsic_ssbo_atomic_or_ir3:
1489 case nir_intrinsic_ssbo_atomic_xor_ir3:
1490 case nir_intrinsic_ssbo_atomic_exchange_ir3:
1491 case nir_intrinsic_ssbo_atomic_comp_swap_ir3:
1492 if ((ctx->so->type == MESA_SHADER_FRAGMENT) &&
1493 !ctx->s->info.fs.early_fragment_tests)
1494 ctx->so->no_earlyz = true;
1495 dst[0] = ctx->funcs->emit_intrinsic_atomic_ssbo(ctx, intr);
1496 break;
1497 case nir_intrinsic_load_shared:
1498 emit_intrinsic_load_shared(ctx, intr, dst);
1499 break;
1500 case nir_intrinsic_store_shared:
1501 emit_intrinsic_store_shared(ctx, intr);
1502 break;
1503 case nir_intrinsic_shared_atomic_add:
1504 case nir_intrinsic_shared_atomic_imin:
1505 case nir_intrinsic_shared_atomic_umin:
1506 case nir_intrinsic_shared_atomic_imax:
1507 case nir_intrinsic_shared_atomic_umax:
1508 case nir_intrinsic_shared_atomic_and:
1509 case nir_intrinsic_shared_atomic_or:
1510 case nir_intrinsic_shared_atomic_xor:
1511 case nir_intrinsic_shared_atomic_exchange:
1512 case nir_intrinsic_shared_atomic_comp_swap:
1513 dst[0] = emit_intrinsic_atomic_shared(ctx, intr);
1514 break;
1515 case nir_intrinsic_image_deref_load:
1516 emit_intrinsic_load_image(ctx, intr, dst);
1517 break;
1518 case nir_intrinsic_image_deref_store:
1519 if ((ctx->so->type == MESA_SHADER_FRAGMENT) &&
1520 !ctx->s->info.fs.early_fragment_tests)
1521 ctx->so->no_earlyz = true;
1522 ctx->funcs->emit_intrinsic_store_image(ctx, intr);
1523 break;
1524 case nir_intrinsic_image_deref_size:
1525 emit_intrinsic_image_size(ctx, intr, dst);
1526 break;
1527 case nir_intrinsic_image_deref_atomic_add:
1528 case nir_intrinsic_image_deref_atomic_imin:
1529 case nir_intrinsic_image_deref_atomic_umin:
1530 case nir_intrinsic_image_deref_atomic_imax:
1531 case nir_intrinsic_image_deref_atomic_umax:
1532 case nir_intrinsic_image_deref_atomic_and:
1533 case nir_intrinsic_image_deref_atomic_or:
1534 case nir_intrinsic_image_deref_atomic_xor:
1535 case nir_intrinsic_image_deref_atomic_exchange:
1536 case nir_intrinsic_image_deref_atomic_comp_swap:
1537 if ((ctx->so->type == MESA_SHADER_FRAGMENT) &&
1538 !ctx->s->info.fs.early_fragment_tests)
1539 ctx->so->no_earlyz = true;
1540 dst[0] = ctx->funcs->emit_intrinsic_atomic_image(ctx, intr);
1541 break;
1542 case nir_intrinsic_barrier:
1543 case nir_intrinsic_memory_barrier:
1544 case nir_intrinsic_group_memory_barrier:
1545 case nir_intrinsic_memory_barrier_atomic_counter:
1546 case nir_intrinsic_memory_barrier_buffer:
1547 case nir_intrinsic_memory_barrier_image:
1548 case nir_intrinsic_memory_barrier_shared:
1549 emit_intrinsic_barrier(ctx, intr);
1550 /* note that blk ptr no longer valid, make that obvious: */
1551 b = NULL;
1552 break;
1553 case nir_intrinsic_store_output:
1554 idx = nir_intrinsic_base(intr);
1555 comp = nir_intrinsic_component(intr);
1556 compile_assert(ctx, nir_src_is_const(intr->src[1]));
1557 idx += nir_src_as_uint(intr->src[1]);
1558
1559 src = ir3_get_src(ctx, &intr->src[0]);
1560 for (int i = 0; i < intr->num_components; i++) {
1561 unsigned n = idx * 4 + i + comp;
1562 ctx->ir->outputs[n] = src[i];
1563 }
1564 break;
1565 case nir_intrinsic_load_base_vertex:
1566 case nir_intrinsic_load_first_vertex:
1567 if (!ctx->basevertex) {
1568 ctx->basevertex = create_driver_param(ctx, IR3_DP_VTXID_BASE);
1569 add_sysval_input(ctx, SYSTEM_VALUE_FIRST_VERTEX, ctx->basevertex);
1570 }
1571 dst[0] = ctx->basevertex;
1572 break;
1573 case nir_intrinsic_load_vertex_id_zero_base:
1574 case nir_intrinsic_load_vertex_id:
1575 if (!ctx->vertex_id) {
1576 gl_system_value sv = (intr->intrinsic == nir_intrinsic_load_vertex_id) ?
1577 SYSTEM_VALUE_VERTEX_ID : SYSTEM_VALUE_VERTEX_ID_ZERO_BASE;
1578 ctx->vertex_id = create_input(ctx, 0);
1579 add_sysval_input(ctx, sv, ctx->vertex_id);
1580 }
1581 dst[0] = ctx->vertex_id;
1582 break;
1583 case nir_intrinsic_load_instance_id:
1584 if (!ctx->instance_id) {
1585 ctx->instance_id = create_input(ctx, 0);
1586 add_sysval_input(ctx, SYSTEM_VALUE_INSTANCE_ID,
1587 ctx->instance_id);
1588 }
1589 dst[0] = ctx->instance_id;
1590 break;
1591 case nir_intrinsic_load_sample_id:
1592 ctx->so->per_samp = true;
1593 /* fall-thru */
1594 case nir_intrinsic_load_sample_id_no_per_sample:
1595 if (!ctx->samp_id) {
1596 ctx->samp_id = create_input(ctx, 0);
1597 ctx->samp_id->regs[0]->flags |= IR3_REG_HALF;
1598 add_sysval_input(ctx, SYSTEM_VALUE_SAMPLE_ID,
1599 ctx->samp_id);
1600 }
1601 dst[0] = ir3_COV(b, ctx->samp_id, TYPE_U16, TYPE_U32);
1602 break;
1603 case nir_intrinsic_load_sample_mask_in:
1604 if (!ctx->samp_mask_in) {
1605 ctx->samp_mask_in = create_input(ctx, 0);
1606 add_sysval_input(ctx, SYSTEM_VALUE_SAMPLE_MASK_IN,
1607 ctx->samp_mask_in);
1608 }
1609 dst[0] = ctx->samp_mask_in;
1610 break;
1611 case nir_intrinsic_load_user_clip_plane:
1612 idx = nir_intrinsic_ucp_id(intr);
1613 for (int i = 0; i < intr->num_components; i++) {
1614 unsigned n = idx * 4 + i;
1615 dst[i] = create_driver_param(ctx, IR3_DP_UCP0_X + n);
1616 }
1617 break;
1618 case nir_intrinsic_load_front_face:
1619 if (!ctx->frag_face) {
1620 ctx->so->frag_face = true;
1621 ctx->frag_face = create_input(ctx, 0);
1622 add_sysval_input(ctx, SYSTEM_VALUE_FRONT_FACE, ctx->frag_face);
1623 ctx->frag_face->regs[0]->flags |= IR3_REG_HALF;
1624 }
1625 /* for fragface, we get -1 for back and 0 for front. However this is
1626 * the inverse of what nir expects (where ~0 is true).
1627 */
1628 dst[0] = ir3_COV(b, ctx->frag_face, TYPE_S16, TYPE_S32);
1629 dst[0] = ir3_NOT_B(b, dst[0], 0);
1630 break;
1631 case nir_intrinsic_load_local_invocation_id:
1632 if (!ctx->local_invocation_id) {
1633 ctx->local_invocation_id = create_input_compmask(ctx, 0, 0x7);
1634 add_sysval_input_compmask(ctx, SYSTEM_VALUE_LOCAL_INVOCATION_ID,
1635 0x7, ctx->local_invocation_id);
1636 }
1637 ir3_split_dest(b, dst, ctx->local_invocation_id, 0, 3);
1638 break;
1639 case nir_intrinsic_load_work_group_id:
1640 if (!ctx->work_group_id) {
1641 ctx->work_group_id = create_input_compmask(ctx, 0, 0x7);
1642 add_sysval_input_compmask(ctx, SYSTEM_VALUE_WORK_GROUP_ID,
1643 0x7, ctx->work_group_id);
1644 ctx->work_group_id->regs[0]->flags |= IR3_REG_HIGH;
1645 }
1646 ir3_split_dest(b, dst, ctx->work_group_id, 0, 3);
1647 break;
1648 case nir_intrinsic_load_num_work_groups:
1649 for (int i = 0; i < intr->num_components; i++) {
1650 dst[i] = create_driver_param(ctx, IR3_DP_NUM_WORK_GROUPS_X + i);
1651 }
1652 break;
1653 case nir_intrinsic_load_local_group_size:
1654 for (int i = 0; i < intr->num_components; i++) {
1655 dst[i] = create_driver_param(ctx, IR3_DP_LOCAL_GROUP_SIZE_X + i);
1656 }
1657 break;
1658 case nir_intrinsic_discard_if:
1659 case nir_intrinsic_discard: {
1660 struct ir3_instruction *cond, *kill;
1661
1662 if (intr->intrinsic == nir_intrinsic_discard_if) {
1663 /* conditional discard: */
1664 src = ir3_get_src(ctx, &intr->src[0]);
1665 cond = ir3_b2n(b, src[0]);
1666 } else {
1667 /* unconditional discard: */
1668 cond = create_immed(b, 1);
1669 }
1670
1671 /* NOTE: only cmps.*.* can write p0.x: */
1672 cond = ir3_CMPS_S(b, cond, 0, create_immed(b, 0), 0);
1673 cond->cat2.condition = IR3_COND_NE;
1674
1675 /* condition always goes in predicate register: */
1676 cond->regs[0]->num = regid(REG_P0, 0);
1677
1678 kill = ir3_KILL(b, cond, 0);
1679 array_insert(ctx->ir, ctx->ir->predicates, kill);
1680
1681 array_insert(b, b->keeps, kill);
1682 ctx->so->no_earlyz = true;
1683
1684 break;
1685 }
1686 case nir_intrinsic_load_shared_ir3:
1687 emit_intrinsic_load_shared_ir3(ctx, intr, dst);
1688 break;
1689 case nir_intrinsic_store_shared_ir3:
1690 emit_intrinsic_store_shared_ir3(ctx, intr);
1691 break;
1692 default:
1693 ir3_context_error(ctx, "Unhandled intrinsic type: %s\n",
1694 nir_intrinsic_infos[intr->intrinsic].name);
1695 break;
1696 }
1697
1698 if (info->has_dest)
1699 ir3_put_dst(ctx, &intr->dest);
1700 }
1701
1702 static void
1703 emit_load_const(struct ir3_context *ctx, nir_load_const_instr *instr)
1704 {
1705 struct ir3_instruction **dst = ir3_get_dst_ssa(ctx, &instr->def,
1706 instr->def.num_components);
1707
1708 if (instr->def.bit_size < 32) {
1709 for (int i = 0; i < instr->def.num_components; i++)
1710 dst[i] = create_immed_typed(ctx->block,
1711 instr->value[i].u16,
1712 TYPE_U16);
1713 } else {
1714 for (int i = 0; i < instr->def.num_components; i++)
1715 dst[i] = create_immed_typed(ctx->block,
1716 instr->value[i].u32,
1717 TYPE_U32);
1718 }
1719
1720 }
1721
1722 static void
1723 emit_undef(struct ir3_context *ctx, nir_ssa_undef_instr *undef)
1724 {
1725 struct ir3_instruction **dst = ir3_get_dst_ssa(ctx, &undef->def,
1726 undef->def.num_components);
1727 type_t type = (undef->def.bit_size < 32) ? TYPE_U16 : TYPE_U32;
1728
1729 /* backend doesn't want undefined instructions, so just plug
1730 * in 0.0..
1731 */
1732 for (int i = 0; i < undef->def.num_components; i++)
1733 dst[i] = create_immed_typed(ctx->block, fui(0.0), type);
1734 }
1735
1736 /*
1737 * texture fetch/sample instructions:
1738 */
1739
1740 static void
1741 tex_info(nir_tex_instr *tex, unsigned *flagsp, unsigned *coordsp)
1742 {
1743 unsigned coords, flags = 0;
1744
1745 /* note: would use tex->coord_components.. except txs.. also,
1746 * since array index goes after shadow ref, we don't want to
1747 * count it:
1748 */
1749 switch (tex->sampler_dim) {
1750 case GLSL_SAMPLER_DIM_1D:
1751 case GLSL_SAMPLER_DIM_BUF:
1752 coords = 1;
1753 break;
1754 case GLSL_SAMPLER_DIM_2D:
1755 case GLSL_SAMPLER_DIM_RECT:
1756 case GLSL_SAMPLER_DIM_EXTERNAL:
1757 case GLSL_SAMPLER_DIM_MS:
1758 coords = 2;
1759 break;
1760 case GLSL_SAMPLER_DIM_3D:
1761 case GLSL_SAMPLER_DIM_CUBE:
1762 coords = 3;
1763 flags |= IR3_INSTR_3D;
1764 break;
1765 default:
1766 unreachable("bad sampler_dim");
1767 }
1768
1769 if (tex->is_shadow && tex->op != nir_texop_lod)
1770 flags |= IR3_INSTR_S;
1771
1772 if (tex->is_array && tex->op != nir_texop_lod)
1773 flags |= IR3_INSTR_A;
1774
1775 *flagsp = flags;
1776 *coordsp = coords;
1777 }
1778
1779 /* Gets the sampler/texture idx as a hvec2. Which could either be dynamic
1780 * or immediate (in which case it will get lowered later to a non .s2en
1781 * version of the tex instruction which encode tex/samp as immediates:
1782 */
1783 static struct ir3_instruction *
1784 get_tex_samp_tex_src(struct ir3_context *ctx, nir_tex_instr *tex)
1785 {
1786 int texture_idx = nir_tex_instr_src_index(tex, nir_tex_src_texture_offset);
1787 int sampler_idx = nir_tex_instr_src_index(tex, nir_tex_src_sampler_offset);
1788 struct ir3_instruction *texture, *sampler;
1789
1790 if (texture_idx >= 0) {
1791 texture = ir3_get_src(ctx, &tex->src[texture_idx].src)[0];
1792 texture = ir3_COV(ctx->block, texture, TYPE_U32, TYPE_U16);
1793 } else {
1794 /* TODO what to do for dynamic case? I guess we only need the
1795 * max index for astc srgb workaround so maybe not a problem
1796 * to worry about if we don't enable indirect samplers for
1797 * a4xx?
1798 */
1799 ctx->max_texture_index = MAX2(ctx->max_texture_index, tex->texture_index);
1800 texture = create_immed_typed(ctx->block, tex->texture_index, TYPE_U16);
1801 }
1802
1803 if (sampler_idx >= 0) {
1804 sampler = ir3_get_src(ctx, &tex->src[sampler_idx].src)[0];
1805 sampler = ir3_COV(ctx->block, sampler, TYPE_U32, TYPE_U16);
1806 } else {
1807 sampler = create_immed_typed(ctx->block, tex->sampler_index, TYPE_U16);
1808 }
1809
1810 return ir3_create_collect(ctx, (struct ir3_instruction*[]){
1811 sampler,
1812 texture,
1813 }, 2);
1814 }
1815
1816 static void
1817 emit_tex(struct ir3_context *ctx, nir_tex_instr *tex)
1818 {
1819 struct ir3_block *b = ctx->block;
1820 struct ir3_instruction **dst, *sam, *src0[12], *src1[4];
1821 struct ir3_instruction * const *coord, * const *off, * const *ddx, * const *ddy;
1822 struct ir3_instruction *lod, *compare, *proj, *sample_index;
1823 bool has_bias = false, has_lod = false, has_proj = false, has_off = false;
1824 unsigned i, coords, flags, ncomp;
1825 unsigned nsrc0 = 0, nsrc1 = 0;
1826 type_t type;
1827 opc_t opc = 0;
1828
1829 ncomp = nir_dest_num_components(tex->dest);
1830
1831 coord = off = ddx = ddy = NULL;
1832 lod = proj = compare = sample_index = NULL;
1833
1834 dst = ir3_get_dst(ctx, &tex->dest, ncomp);
1835
1836 for (unsigned i = 0; i < tex->num_srcs; i++) {
1837 switch (tex->src[i].src_type) {
1838 case nir_tex_src_coord:
1839 coord = ir3_get_src(ctx, &tex->src[i].src);
1840 break;
1841 case nir_tex_src_bias:
1842 lod = ir3_get_src(ctx, &tex->src[i].src)[0];
1843 has_bias = true;
1844 break;
1845 case nir_tex_src_lod:
1846 lod = ir3_get_src(ctx, &tex->src[i].src)[0];
1847 has_lod = true;
1848 break;
1849 case nir_tex_src_comparator: /* shadow comparator */
1850 compare = ir3_get_src(ctx, &tex->src[i].src)[0];
1851 break;
1852 case nir_tex_src_projector:
1853 proj = ir3_get_src(ctx, &tex->src[i].src)[0];
1854 has_proj = true;
1855 break;
1856 case nir_tex_src_offset:
1857 off = ir3_get_src(ctx, &tex->src[i].src);
1858 has_off = true;
1859 break;
1860 case nir_tex_src_ddx:
1861 ddx = ir3_get_src(ctx, &tex->src[i].src);
1862 break;
1863 case nir_tex_src_ddy:
1864 ddy = ir3_get_src(ctx, &tex->src[i].src);
1865 break;
1866 case nir_tex_src_ms_index:
1867 sample_index = ir3_get_src(ctx, &tex->src[i].src)[0];
1868 break;
1869 case nir_tex_src_texture_offset:
1870 case nir_tex_src_sampler_offset:
1871 /* handled in get_tex_samp_src() */
1872 break;
1873 default:
1874 ir3_context_error(ctx, "Unhandled NIR tex src type: %d\n",
1875 tex->src[i].src_type);
1876 return;
1877 }
1878 }
1879
1880 switch (tex->op) {
1881 case nir_texop_tex_prefetch:
1882 compile_assert(ctx, !has_bias);
1883 compile_assert(ctx, !has_lod);
1884 compile_assert(ctx, !compare);
1885 compile_assert(ctx, !has_proj);
1886 compile_assert(ctx, !has_off);
1887 compile_assert(ctx, !ddx);
1888 compile_assert(ctx, !ddy);
1889 compile_assert(ctx, !sample_index);
1890 compile_assert(ctx, nir_tex_instr_src_index(tex, nir_tex_src_texture_offset) < 0);
1891 compile_assert(ctx, nir_tex_instr_src_index(tex, nir_tex_src_sampler_offset) < 0);
1892
1893 if (ctx->so->num_sampler_prefetch < IR3_MAX_SAMPLER_PREFETCH) {
1894 opc = OPC_META_TEX_PREFETCH;
1895 ctx->so->num_sampler_prefetch++;
1896 break;
1897 }
1898 /* fallthru */
1899 case nir_texop_tex: opc = has_lod ? OPC_SAML : OPC_SAM; break;
1900 case nir_texop_txb: opc = OPC_SAMB; break;
1901 case nir_texop_txl: opc = OPC_SAML; break;
1902 case nir_texop_txd: opc = OPC_SAMGQ; break;
1903 case nir_texop_txf: opc = OPC_ISAML; break;
1904 case nir_texop_lod: opc = OPC_GETLOD; break;
1905 case nir_texop_tg4:
1906 /* NOTE: a4xx might need to emulate gather w/ txf (this is
1907 * what blob does, seems gather is broken?), and a3xx did
1908 * not support it (but probably could also emulate).
1909 */
1910 switch (tex->component) {
1911 case 0: opc = OPC_GATHER4R; break;
1912 case 1: opc = OPC_GATHER4G; break;
1913 case 2: opc = OPC_GATHER4B; break;
1914 case 3: opc = OPC_GATHER4A; break;
1915 }
1916 break;
1917 case nir_texop_txf_ms_fb:
1918 case nir_texop_txf_ms: opc = OPC_ISAMM; break;
1919 default:
1920 ir3_context_error(ctx, "Unhandled NIR tex type: %d\n", tex->op);
1921 return;
1922 }
1923
1924 tex_info(tex, &flags, &coords);
1925
1926 /*
1927 * lay out the first argument in the proper order:
1928 * - actual coordinates first
1929 * - shadow reference
1930 * - array index
1931 * - projection w
1932 * - starting at offset 4, dpdx.xy, dpdy.xy
1933 *
1934 * bias/lod go into the second arg
1935 */
1936
1937 /* insert tex coords: */
1938 for (i = 0; i < coords; i++)
1939 src0[i] = coord[i];
1940
1941 nsrc0 = i;
1942
1943 /* scale up integer coords for TXF based on the LOD */
1944 if (ctx->compiler->unminify_coords && (opc == OPC_ISAML)) {
1945 assert(has_lod);
1946 for (i = 0; i < coords; i++)
1947 src0[i] = ir3_SHL_B(b, src0[i], 0, lod, 0);
1948 }
1949
1950 if (coords == 1) {
1951 /* hw doesn't do 1d, so we treat it as 2d with
1952 * height of 1, and patch up the y coord.
1953 */
1954 if (is_isam(opc)) {
1955 src0[nsrc0++] = create_immed(b, 0);
1956 } else {
1957 src0[nsrc0++] = create_immed(b, fui(0.5));
1958 }
1959 }
1960
1961 if (tex->is_shadow && tex->op != nir_texop_lod)
1962 src0[nsrc0++] = compare;
1963
1964 if (tex->is_array && tex->op != nir_texop_lod) {
1965 struct ir3_instruction *idx = coord[coords];
1966
1967 /* the array coord for cube arrays needs 0.5 added to it */
1968 if (ctx->compiler->array_index_add_half && !is_isam(opc))
1969 idx = ir3_ADD_F(b, idx, 0, create_immed(b, fui(0.5)), 0);
1970
1971 src0[nsrc0++] = idx;
1972 }
1973
1974 if (has_proj) {
1975 src0[nsrc0++] = proj;
1976 flags |= IR3_INSTR_P;
1977 }
1978
1979 /* pad to 4, then ddx/ddy: */
1980 if (tex->op == nir_texop_txd) {
1981 while (nsrc0 < 4)
1982 src0[nsrc0++] = create_immed(b, fui(0.0));
1983 for (i = 0; i < coords; i++)
1984 src0[nsrc0++] = ddx[i];
1985 if (coords < 2)
1986 src0[nsrc0++] = create_immed(b, fui(0.0));
1987 for (i = 0; i < coords; i++)
1988 src0[nsrc0++] = ddy[i];
1989 if (coords < 2)
1990 src0[nsrc0++] = create_immed(b, fui(0.0));
1991 }
1992
1993 /* NOTE a3xx (and possibly a4xx?) might be different, using isaml
1994 * with scaled x coord according to requested sample:
1995 */
1996 if (opc == OPC_ISAMM) {
1997 if (ctx->compiler->txf_ms_with_isaml) {
1998 /* the samples are laid out in x dimension as
1999 * 0 1 2 3
2000 * x_ms = (x << ms) + sample_index;
2001 */
2002 struct ir3_instruction *ms;
2003 ms = create_immed(b, (ctx->samples >> (2 * tex->texture_index)) & 3);
2004
2005 src0[0] = ir3_SHL_B(b, src0[0], 0, ms, 0);
2006 src0[0] = ir3_ADD_U(b, src0[0], 0, sample_index, 0);
2007
2008 opc = OPC_ISAML;
2009 } else {
2010 src0[nsrc0++] = sample_index;
2011 }
2012 }
2013
2014 /*
2015 * second argument (if applicable):
2016 * - offsets
2017 * - lod
2018 * - bias
2019 */
2020 if (has_off | has_lod | has_bias) {
2021 if (has_off) {
2022 unsigned off_coords = coords;
2023 if (tex->sampler_dim == GLSL_SAMPLER_DIM_CUBE)
2024 off_coords--;
2025 for (i = 0; i < off_coords; i++)
2026 src1[nsrc1++] = off[i];
2027 if (off_coords < 2)
2028 src1[nsrc1++] = create_immed(b, fui(0.0));
2029 flags |= IR3_INSTR_O;
2030 }
2031
2032 if (has_lod | has_bias)
2033 src1[nsrc1++] = lod;
2034 }
2035
2036 switch (tex->dest_type) {
2037 case nir_type_invalid:
2038 case nir_type_float:
2039 type = TYPE_F32;
2040 break;
2041 case nir_type_int:
2042 type = TYPE_S32;
2043 break;
2044 case nir_type_uint:
2045 case nir_type_bool:
2046 type = TYPE_U32;
2047 break;
2048 default:
2049 unreachable("bad dest_type");
2050 }
2051
2052 if (opc == OPC_GETLOD)
2053 type = TYPE_S32;
2054
2055 struct ir3_instruction *samp_tex;
2056
2057 if (tex->op == nir_texop_txf_ms_fb) {
2058 /* only expect a single txf_ms_fb per shader: */
2059 compile_assert(ctx, !ctx->so->fb_read);
2060 compile_assert(ctx, ctx->so->type == MESA_SHADER_FRAGMENT);
2061
2062 ctx->so->fb_read = true;
2063 samp_tex = ir3_create_collect(ctx, (struct ir3_instruction*[]){
2064 create_immed_typed(ctx->block, ctx->so->num_samp, TYPE_U16),
2065 create_immed_typed(ctx->block, ctx->so->num_samp, TYPE_U16),
2066 }, 2);
2067
2068 ctx->so->num_samp++;
2069 } else {
2070 samp_tex = get_tex_samp_tex_src(ctx, tex);
2071 }
2072
2073 struct ir3_instruction *col0 = ir3_create_collect(ctx, src0, nsrc0);
2074 struct ir3_instruction *col1 = ir3_create_collect(ctx, src1, nsrc1);
2075
2076 if (opc == OPC_META_TEX_PREFETCH) {
2077 int idx = nir_tex_instr_src_index(tex, nir_tex_src_coord);
2078
2079 compile_assert(ctx, tex->src[idx].src.is_ssa);
2080
2081 sam = ir3_META_TEX_PREFETCH(b);
2082 ir3_reg_create(sam, 0, 0)->wrmask = MASK(ncomp); /* dst */
2083 sam->prefetch.input_offset =
2084 ir3_nir_coord_offset(tex->src[idx].src.ssa);
2085 sam->prefetch.tex = tex->texture_index;
2086 sam->prefetch.samp = tex->sampler_index;
2087 } else {
2088 sam = ir3_SAM(b, opc, type, MASK(ncomp), flags,
2089 samp_tex, col0, col1);
2090 }
2091
2092 if ((ctx->astc_srgb & (1 << tex->texture_index)) && !nir_tex_instr_is_query(tex)) {
2093 assert(opc != OPC_META_TEX_PREFETCH);
2094
2095 /* only need first 3 components: */
2096 sam->regs[0]->wrmask = 0x7;
2097 ir3_split_dest(b, dst, sam, 0, 3);
2098
2099 /* we need to sample the alpha separately with a non-ASTC
2100 * texture state:
2101 */
2102 sam = ir3_SAM(b, opc, type, 0b1000, flags,
2103 samp_tex, col0, col1);
2104
2105 array_insert(ctx->ir, ctx->ir->astc_srgb, sam);
2106
2107 /* fixup .w component: */
2108 ir3_split_dest(b, &dst[3], sam, 3, 1);
2109 } else {
2110 /* normal (non-workaround) case: */
2111 ir3_split_dest(b, dst, sam, 0, ncomp);
2112 }
2113
2114 /* GETLOD returns results in 4.8 fixed point */
2115 if (opc == OPC_GETLOD) {
2116 struct ir3_instruction *factor = create_immed(b, fui(1.0 / 256));
2117
2118 compile_assert(ctx, tex->dest_type == nir_type_float);
2119 for (i = 0; i < 2; i++) {
2120 dst[i] = ir3_MUL_F(b, ir3_COV(b, dst[i], TYPE_S32, TYPE_F32), 0,
2121 factor, 0);
2122 }
2123 }
2124
2125 ir3_put_dst(ctx, &tex->dest);
2126 }
2127
2128 static void
2129 emit_tex_info(struct ir3_context *ctx, nir_tex_instr *tex, unsigned idx)
2130 {
2131 struct ir3_block *b = ctx->block;
2132 struct ir3_instruction **dst, *sam;
2133
2134 dst = ir3_get_dst(ctx, &tex->dest, 1);
2135
2136 sam = ir3_SAM(b, OPC_GETINFO, TYPE_U32, 1 << idx, 0,
2137 get_tex_samp_tex_src(ctx, tex), NULL, NULL);
2138
2139 /* even though there is only one component, since it ends
2140 * up in .y/.z/.w rather than .x, we need a split_dest()
2141 */
2142 if (idx)
2143 ir3_split_dest(b, dst, sam, 0, idx + 1);
2144
2145 /* The # of levels comes from getinfo.z. We need to add 1 to it, since
2146 * the value in TEX_CONST_0 is zero-based.
2147 */
2148 if (ctx->compiler->levels_add_one)
2149 dst[0] = ir3_ADD_U(b, dst[0], 0, create_immed(b, 1), 0);
2150
2151 ir3_put_dst(ctx, &tex->dest);
2152 }
2153
2154 static void
2155 emit_tex_txs(struct ir3_context *ctx, nir_tex_instr *tex)
2156 {
2157 struct ir3_block *b = ctx->block;
2158 struct ir3_instruction **dst, *sam;
2159 struct ir3_instruction *lod;
2160 unsigned flags, coords;
2161
2162 tex_info(tex, &flags, &coords);
2163
2164 /* Actually we want the number of dimensions, not coordinates. This
2165 * distinction only matters for cubes.
2166 */
2167 if (tex->sampler_dim == GLSL_SAMPLER_DIM_CUBE)
2168 coords = 2;
2169
2170 dst = ir3_get_dst(ctx, &tex->dest, 4);
2171
2172 compile_assert(ctx, tex->num_srcs == 1);
2173 compile_assert(ctx, tex->src[0].src_type == nir_tex_src_lod);
2174
2175 lod = ir3_get_src(ctx, &tex->src[0].src)[0];
2176
2177 sam = ir3_SAM(b, OPC_GETSIZE, TYPE_U32, 0b1111, flags,
2178 get_tex_samp_tex_src(ctx, tex), lod, NULL);
2179
2180 ir3_split_dest(b, dst, sam, 0, 4);
2181
2182 /* Array size actually ends up in .w rather than .z. This doesn't
2183 * matter for miplevel 0, but for higher mips the value in z is
2184 * minified whereas w stays. Also, the value in TEX_CONST_3_DEPTH is
2185 * returned, which means that we have to add 1 to it for arrays.
2186 */
2187 if (tex->is_array) {
2188 if (ctx->compiler->levels_add_one) {
2189 dst[coords] = ir3_ADD_U(b, dst[3], 0, create_immed(b, 1), 0);
2190 } else {
2191 dst[coords] = ir3_MOV(b, dst[3], TYPE_U32);
2192 }
2193 }
2194
2195 ir3_put_dst(ctx, &tex->dest);
2196 }
2197
2198 static void
2199 emit_jump(struct ir3_context *ctx, nir_jump_instr *jump)
2200 {
2201 switch (jump->type) {
2202 case nir_jump_break:
2203 case nir_jump_continue:
2204 case nir_jump_return:
2205 /* I *think* we can simply just ignore this, and use the
2206 * successor block link to figure out where we need to
2207 * jump to for break/continue
2208 */
2209 break;
2210 default:
2211 ir3_context_error(ctx, "Unhandled NIR jump type: %d\n", jump->type);
2212 break;
2213 }
2214 }
2215
2216 static void
2217 emit_instr(struct ir3_context *ctx, nir_instr *instr)
2218 {
2219 switch (instr->type) {
2220 case nir_instr_type_alu:
2221 emit_alu(ctx, nir_instr_as_alu(instr));
2222 break;
2223 case nir_instr_type_deref:
2224 /* ignored, handled as part of the intrinsic they are src to */
2225 break;
2226 case nir_instr_type_intrinsic:
2227 emit_intrinsic(ctx, nir_instr_as_intrinsic(instr));
2228 break;
2229 case nir_instr_type_load_const:
2230 emit_load_const(ctx, nir_instr_as_load_const(instr));
2231 break;
2232 case nir_instr_type_ssa_undef:
2233 emit_undef(ctx, nir_instr_as_ssa_undef(instr));
2234 break;
2235 case nir_instr_type_tex: {
2236 nir_tex_instr *tex = nir_instr_as_tex(instr);
2237 /* couple tex instructions get special-cased:
2238 */
2239 switch (tex->op) {
2240 case nir_texop_txs:
2241 emit_tex_txs(ctx, tex);
2242 break;
2243 case nir_texop_query_levels:
2244 emit_tex_info(ctx, tex, 2);
2245 break;
2246 case nir_texop_texture_samples:
2247 emit_tex_info(ctx, tex, 3);
2248 break;
2249 default:
2250 emit_tex(ctx, tex);
2251 break;
2252 }
2253 break;
2254 }
2255 case nir_instr_type_jump:
2256 emit_jump(ctx, nir_instr_as_jump(instr));
2257 break;
2258 case nir_instr_type_phi:
2259 /* we have converted phi webs to regs in NIR by now */
2260 ir3_context_error(ctx, "Unexpected NIR instruction type: %d\n", instr->type);
2261 break;
2262 case nir_instr_type_call:
2263 case nir_instr_type_parallel_copy:
2264 ir3_context_error(ctx, "Unhandled NIR instruction type: %d\n", instr->type);
2265 break;
2266 }
2267 }
2268
2269 static struct ir3_block *
2270 get_block(struct ir3_context *ctx, const nir_block *nblock)
2271 {
2272 struct ir3_block *block;
2273 struct hash_entry *hentry;
2274
2275 hentry = _mesa_hash_table_search(ctx->block_ht, nblock);
2276 if (hentry)
2277 return hentry->data;
2278
2279 block = ir3_block_create(ctx->ir);
2280 block->nblock = nblock;
2281 _mesa_hash_table_insert(ctx->block_ht, nblock, block);
2282
2283 block->predecessors = _mesa_pointer_set_create(block);
2284 set_foreach(nblock->predecessors, sentry) {
2285 _mesa_set_add(block->predecessors, get_block(ctx, sentry->key));
2286 }
2287
2288 return block;
2289 }
2290
2291 static void
2292 emit_block(struct ir3_context *ctx, nir_block *nblock)
2293 {
2294 struct ir3_block *block = get_block(ctx, nblock);
2295
2296 for (int i = 0; i < ARRAY_SIZE(block->successors); i++) {
2297 if (nblock->successors[i]) {
2298 block->successors[i] =
2299 get_block(ctx, nblock->successors[i]);
2300 }
2301 }
2302
2303 ctx->block = block;
2304 list_addtail(&block->node, &ctx->ir->block_list);
2305
2306 /* re-emit addr register in each block if needed: */
2307 for (int i = 0; i < ARRAY_SIZE(ctx->addr_ht); i++) {
2308 _mesa_hash_table_destroy(ctx->addr_ht[i], NULL);
2309 ctx->addr_ht[i] = NULL;
2310 }
2311
2312 nir_foreach_instr(instr, nblock) {
2313 ctx->cur_instr = instr;
2314 emit_instr(ctx, instr);
2315 ctx->cur_instr = NULL;
2316 if (ctx->error)
2317 return;
2318 }
2319 }
2320
2321 static void emit_cf_list(struct ir3_context *ctx, struct exec_list *list);
2322
2323 static void
2324 emit_if(struct ir3_context *ctx, nir_if *nif)
2325 {
2326 struct ir3_instruction *condition = ir3_get_src(ctx, &nif->condition)[0];
2327
2328 ctx->block->condition =
2329 ir3_get_predicate(ctx, ir3_b2n(condition->block, condition));
2330
2331 emit_cf_list(ctx, &nif->then_list);
2332 emit_cf_list(ctx, &nif->else_list);
2333 }
2334
2335 static void
2336 emit_loop(struct ir3_context *ctx, nir_loop *nloop)
2337 {
2338 emit_cf_list(ctx, &nloop->body);
2339 ctx->so->loops++;
2340 }
2341
2342 static void
2343 stack_push(struct ir3_context *ctx)
2344 {
2345 ctx->stack++;
2346 ctx->max_stack = MAX2(ctx->max_stack, ctx->stack);
2347 }
2348
2349 static void
2350 stack_pop(struct ir3_context *ctx)
2351 {
2352 compile_assert(ctx, ctx->stack > 0);
2353 ctx->stack--;
2354 }
2355
2356 static void
2357 emit_cf_list(struct ir3_context *ctx, struct exec_list *list)
2358 {
2359 foreach_list_typed(nir_cf_node, node, node, list) {
2360 switch (node->type) {
2361 case nir_cf_node_block:
2362 emit_block(ctx, nir_cf_node_as_block(node));
2363 break;
2364 case nir_cf_node_if:
2365 stack_push(ctx);
2366 emit_if(ctx, nir_cf_node_as_if(node));
2367 stack_pop(ctx);
2368 break;
2369 case nir_cf_node_loop:
2370 stack_push(ctx);
2371 emit_loop(ctx, nir_cf_node_as_loop(node));
2372 stack_pop(ctx);
2373 break;
2374 case nir_cf_node_function:
2375 ir3_context_error(ctx, "TODO\n");
2376 break;
2377 }
2378 }
2379 }
2380
2381 /* emit stream-out code. At this point, the current block is the original
2382 * (nir) end block, and nir ensures that all flow control paths terminate
2383 * into the end block. We re-purpose the original end block to generate
2384 * the 'if (vtxcnt < maxvtxcnt)' condition, then append the conditional
2385 * block holding stream-out write instructions, followed by the new end
2386 * block:
2387 *
2388 * blockOrigEnd {
2389 * p0.x = (vtxcnt < maxvtxcnt)
2390 * // succs: blockStreamOut, blockNewEnd
2391 * }
2392 * blockStreamOut {
2393 * ... stream-out instructions ...
2394 * // succs: blockNewEnd
2395 * }
2396 * blockNewEnd {
2397 * }
2398 */
2399 static void
2400 emit_stream_out(struct ir3_context *ctx)
2401 {
2402 struct ir3 *ir = ctx->ir;
2403 struct ir3_stream_output_info *strmout =
2404 &ctx->so->shader->stream_output;
2405 struct ir3_block *orig_end_block, *stream_out_block, *new_end_block;
2406 struct ir3_instruction *vtxcnt, *maxvtxcnt, *cond;
2407 struct ir3_instruction *bases[IR3_MAX_SO_BUFFERS];
2408
2409 /* create vtxcnt input in input block at top of shader,
2410 * so that it is seen as live over the entire duration
2411 * of the shader:
2412 */
2413 vtxcnt = create_input(ctx, 0);
2414 add_sysval_input(ctx, SYSTEM_VALUE_VERTEX_CNT, vtxcnt);
2415
2416 maxvtxcnt = create_driver_param(ctx, IR3_DP_VTXCNT_MAX);
2417
2418 /* at this point, we are at the original 'end' block,
2419 * re-purpose this block to stream-out condition, then
2420 * append stream-out block and new-end block
2421 */
2422 orig_end_block = ctx->block;
2423
2424 // TODO these blocks need to update predecessors..
2425 // maybe w/ store_global intrinsic, we could do this
2426 // stuff in nir->nir pass
2427
2428 stream_out_block = ir3_block_create(ir);
2429 list_addtail(&stream_out_block->node, &ir->block_list);
2430
2431 new_end_block = ir3_block_create(ir);
2432 list_addtail(&new_end_block->node, &ir->block_list);
2433
2434 orig_end_block->successors[0] = stream_out_block;
2435 orig_end_block->successors[1] = new_end_block;
2436 stream_out_block->successors[0] = new_end_block;
2437
2438 /* setup 'if (vtxcnt < maxvtxcnt)' condition: */
2439 cond = ir3_CMPS_S(ctx->block, vtxcnt, 0, maxvtxcnt, 0);
2440 cond->regs[0]->num = regid(REG_P0, 0);
2441 cond->cat2.condition = IR3_COND_LT;
2442
2443 /* condition goes on previous block to the conditional,
2444 * since it is used to pick which of the two successor
2445 * paths to take:
2446 */
2447 orig_end_block->condition = cond;
2448
2449 /* switch to stream_out_block to generate the stream-out
2450 * instructions:
2451 */
2452 ctx->block = stream_out_block;
2453
2454 /* Calculate base addresses based on vtxcnt. Instructions
2455 * generated for bases not used in following loop will be
2456 * stripped out in the backend.
2457 */
2458 for (unsigned i = 0; i < IR3_MAX_SO_BUFFERS; i++) {
2459 struct ir3_const_state *const_state = &ctx->so->shader->const_state;
2460 unsigned stride = strmout->stride[i];
2461 struct ir3_instruction *base, *off;
2462
2463 base = create_uniform(ctx->block, regid(const_state->offsets.tfbo, i));
2464
2465 /* 24-bit should be enough: */
2466 off = ir3_MUL_U24(ctx->block, vtxcnt, 0,
2467 create_immed(ctx->block, stride * 4), 0);
2468
2469 bases[i] = ir3_ADD_S(ctx->block, off, 0, base, 0);
2470 }
2471
2472 /* Generate the per-output store instructions: */
2473 for (unsigned i = 0; i < strmout->num_outputs; i++) {
2474 for (unsigned j = 0; j < strmout->output[i].num_components; j++) {
2475 unsigned c = j + strmout->output[i].start_component;
2476 struct ir3_instruction *base, *out, *stg;
2477
2478 base = bases[strmout->output[i].output_buffer];
2479 out = ctx->ir->outputs[regid(strmout->output[i].register_index, c)];
2480
2481 stg = ir3_STG(ctx->block, base, 0, out, 0,
2482 create_immed(ctx->block, 1), 0);
2483 stg->cat6.type = TYPE_U32;
2484 stg->cat6.dst_offset = (strmout->output[i].dst_offset + j) * 4;
2485
2486 array_insert(ctx->block, ctx->block->keeps, stg);
2487 }
2488 }
2489
2490 /* and finally switch to the new_end_block: */
2491 ctx->block = new_end_block;
2492 }
2493
2494 static void
2495 emit_function(struct ir3_context *ctx, nir_function_impl *impl)
2496 {
2497 nir_metadata_require(impl, nir_metadata_block_index);
2498
2499 compile_assert(ctx, ctx->stack == 0);
2500
2501 emit_cf_list(ctx, &impl->body);
2502 emit_block(ctx, impl->end_block);
2503
2504 compile_assert(ctx, ctx->stack == 0);
2505
2506 /* at this point, we should have a single empty block,
2507 * into which we emit the 'end' instruction.
2508 */
2509 compile_assert(ctx, list_empty(&ctx->block->instr_list));
2510
2511 /* If stream-out (aka transform-feedback) enabled, emit the
2512 * stream-out instructions, followed by a new empty block (into
2513 * which the 'end' instruction lands).
2514 *
2515 * NOTE: it is done in this order, rather than inserting before
2516 * we emit end_block, because NIR guarantees that all blocks
2517 * flow into end_block, and that end_block has no successors.
2518 * So by re-purposing end_block as the first block of stream-
2519 * out, we guarantee that all exit paths flow into the stream-
2520 * out instructions.
2521 */
2522 if ((ctx->compiler->gpu_id < 500) &&
2523 (ctx->so->shader->stream_output.num_outputs > 0) &&
2524 !ctx->so->binning_pass) {
2525 debug_assert(ctx->so->type == MESA_SHADER_VERTEX);
2526 emit_stream_out(ctx);
2527 }
2528
2529 /* Vertex shaders in a tessellation or geometry pipeline treat END as a
2530 * NOP and has an epilogue that writes the VS outputs to local storage, to
2531 * be read by the HS. Then it resets execution mask (chmask) and chains
2532 * to the next shader (chsh).
2533 */
2534 if (ctx->so->type == MESA_SHADER_VERTEX && ctx->so->key.has_gs) {
2535 struct ir3_instruction *chmask =
2536 ir3_CHMASK(ctx->block);
2537 chmask->barrier_class = IR3_BARRIER_EVERYTHING;
2538 chmask->barrier_conflict = IR3_BARRIER_EVERYTHING;
2539
2540 struct ir3_instruction *chsh =
2541 ir3_CHSH(ctx->block);
2542 chsh->barrier_class = IR3_BARRIER_EVERYTHING;
2543 chsh->barrier_conflict = IR3_BARRIER_EVERYTHING;
2544 } else {
2545 ir3_END(ctx->block);
2546 }
2547 }
2548
2549 static void
2550 setup_input(struct ir3_context *ctx, nir_variable *in)
2551 {
2552 struct ir3_shader_variant *so = ctx->so;
2553 unsigned ncomp = glsl_get_components(in->type);
2554 unsigned n = in->data.driver_location;
2555 unsigned frac = in->data.location_frac;
2556 unsigned slot = in->data.location;
2557
2558 /* Inputs are loaded using ldlw or ldg for these stages. */
2559 if (ctx->so->type == MESA_SHADER_TESS_CTRL ||
2560 ctx->so->type == MESA_SHADER_TESS_EVAL ||
2561 ctx->so->type == MESA_SHADER_GEOMETRY)
2562 return;
2563
2564 /* skip unread inputs, we could end up with (for example), unsplit
2565 * matrix/etc inputs in the case they are not read, so just silently
2566 * skip these.
2567 */
2568 if (ncomp > 4)
2569 return;
2570
2571 so->inputs[n].slot = slot;
2572 so->inputs[n].compmask = (1 << (ncomp + frac)) - 1;
2573 so->inputs_count = MAX2(so->inputs_count, n + 1);
2574 so->inputs[n].interpolate = in->data.interpolation;
2575
2576 if (ctx->so->type == MESA_SHADER_FRAGMENT) {
2577
2578 /* if any varyings have 'sample' qualifer, that triggers us
2579 * to run in per-sample mode:
2580 */
2581 so->per_samp |= in->data.sample;
2582
2583 for (int i = 0; i < ncomp; i++) {
2584 struct ir3_instruction *instr = NULL;
2585 unsigned idx = (n * 4) + i + frac;
2586
2587 if (slot == VARYING_SLOT_POS) {
2588 ir3_context_error(ctx, "fragcoord should be a sysval!\n");
2589 } else if (slot == VARYING_SLOT_PNTC) {
2590 /* see for example st_nir_fixup_varying_slots().. this is
2591 * maybe a bit mesa/st specific. But we need things to line
2592 * up for this in fdN_program:
2593 * unsigned texmask = 1 << (slot - VARYING_SLOT_VAR0);
2594 * if (emit->sprite_coord_enable & texmask) {
2595 * ...
2596 * }
2597 */
2598 so->inputs[n].slot = VARYING_SLOT_VAR8;
2599 so->inputs[n].bary = true;
2600 instr = create_frag_input(ctx, false, idx);
2601 } else {
2602 /* detect the special case for front/back colors where
2603 * we need to do flat vs smooth shading depending on
2604 * rast state:
2605 */
2606 if (in->data.interpolation == INTERP_MODE_NONE) {
2607 switch (slot) {
2608 case VARYING_SLOT_COL0:
2609 case VARYING_SLOT_COL1:
2610 case VARYING_SLOT_BFC0:
2611 case VARYING_SLOT_BFC1:
2612 so->inputs[n].rasterflat = true;
2613 break;
2614 default:
2615 break;
2616 }
2617 }
2618
2619 if (ctx->compiler->flat_bypass) {
2620 if ((so->inputs[n].interpolate == INTERP_MODE_FLAT) ||
2621 (so->inputs[n].rasterflat && ctx->so->key.rasterflat))
2622 so->inputs[n].use_ldlv = true;
2623 }
2624
2625 so->inputs[n].bary = true;
2626
2627 instr = create_frag_input(ctx, so->inputs[n].use_ldlv, idx);
2628 }
2629
2630 compile_assert(ctx, idx < ctx->ir->ninputs);
2631
2632 ctx->ir->inputs[idx] = instr;
2633 }
2634 } else if (ctx->so->type == MESA_SHADER_VERTEX) {
2635 for (int i = 0; i < ncomp; i++) {
2636 unsigned idx = (n * 4) + i + frac;
2637 compile_assert(ctx, idx < ctx->ir->ninputs);
2638 ctx->ir->inputs[idx] = create_input(ctx, idx);
2639 }
2640 } else {
2641 ir3_context_error(ctx, "unknown shader type: %d\n", ctx->so->type);
2642 }
2643
2644 if (so->inputs[n].bary || (ctx->so->type == MESA_SHADER_VERTEX)) {
2645 so->total_in += ncomp;
2646 }
2647 }
2648
2649 /* Initially we assign non-packed inloc's for varyings, as we don't really
2650 * know up-front which components will be unused. After all the compilation
2651 * stages we scan the shader to see which components are actually used, and
2652 * re-pack the inlocs to eliminate unneeded varyings.
2653 */
2654 static void
2655 pack_inlocs(struct ir3_context *ctx)
2656 {
2657 struct ir3_shader_variant *so = ctx->so;
2658 uint8_t used_components[so->inputs_count];
2659
2660 memset(used_components, 0, sizeof(used_components));
2661
2662 /*
2663 * First Step: scan shader to find which bary.f/ldlv remain:
2664 */
2665
2666 list_for_each_entry (struct ir3_block, block, &ctx->ir->block_list, node) {
2667 list_for_each_entry (struct ir3_instruction, instr, &block->instr_list, node) {
2668 if (is_input(instr)) {
2669 unsigned inloc = instr->regs[1]->iim_val;
2670 unsigned i = inloc / 4;
2671 unsigned j = inloc % 4;
2672
2673 compile_assert(ctx, instr->regs[1]->flags & IR3_REG_IMMED);
2674 compile_assert(ctx, i < so->inputs_count);
2675
2676 used_components[i] |= 1 << j;
2677 } else if (instr->opc == OPC_META_TEX_PREFETCH) {
2678 for (int n = 0; n < 2; n++) {
2679 unsigned inloc = instr->prefetch.input_offset + n;
2680 unsigned i = inloc / 4;
2681 unsigned j = inloc % 4;
2682
2683 compile_assert(ctx, i < so->inputs_count);
2684
2685 used_components[i] |= 1 << j;
2686 }
2687 }
2688 }
2689 }
2690
2691 /*
2692 * Second Step: reassign varying inloc/slots:
2693 */
2694
2695 unsigned actual_in = 0;
2696 unsigned inloc = 0;
2697
2698 for (unsigned i = 0; i < so->inputs_count; i++) {
2699 unsigned compmask = 0, maxcomp = 0;
2700
2701 so->inputs[i].inloc = inloc;
2702 so->inputs[i].bary = false;
2703
2704 for (unsigned j = 0; j < 4; j++) {
2705 if (!(used_components[i] & (1 << j)))
2706 continue;
2707
2708 compmask |= (1 << j);
2709 actual_in++;
2710 maxcomp = j + 1;
2711
2712 /* at this point, since used_components[i] mask is only
2713 * considering varyings (ie. not sysvals) we know this
2714 * is a varying:
2715 */
2716 so->inputs[i].bary = true;
2717 }
2718
2719 if (so->inputs[i].bary) {
2720 so->varying_in++;
2721 so->inputs[i].compmask = (1 << maxcomp) - 1;
2722 inloc += maxcomp;
2723 }
2724 }
2725
2726 /*
2727 * Third Step: reassign packed inloc's:
2728 */
2729
2730 list_for_each_entry (struct ir3_block, block, &ctx->ir->block_list, node) {
2731 list_for_each_entry (struct ir3_instruction, instr, &block->instr_list, node) {
2732 if (is_input(instr)) {
2733 unsigned inloc = instr->regs[1]->iim_val;
2734 unsigned i = inloc / 4;
2735 unsigned j = inloc % 4;
2736
2737 instr->regs[1]->iim_val = so->inputs[i].inloc + j;
2738 }
2739 }
2740 }
2741 }
2742
2743 static void
2744 setup_output(struct ir3_context *ctx, nir_variable *out)
2745 {
2746 struct ir3_shader_variant *so = ctx->so;
2747 unsigned ncomp = glsl_get_components(out->type);
2748 unsigned n = out->data.driver_location;
2749 unsigned frac = out->data.location_frac;
2750 unsigned slot = out->data.location;
2751 unsigned comp = 0;
2752
2753 if (ctx->so->type == MESA_SHADER_FRAGMENT) {
2754 switch (slot) {
2755 case FRAG_RESULT_DEPTH:
2756 comp = 2; /* tgsi will write to .z component */
2757 so->writes_pos = true;
2758 break;
2759 case FRAG_RESULT_COLOR:
2760 so->color0_mrt = 1;
2761 break;
2762 case FRAG_RESULT_SAMPLE_MASK:
2763 so->writes_smask = true;
2764 break;
2765 default:
2766 if (slot >= FRAG_RESULT_DATA0)
2767 break;
2768 ir3_context_error(ctx, "unknown FS output name: %s\n",
2769 gl_frag_result_name(slot));
2770 }
2771 } else if (ctx->so->type == MESA_SHADER_VERTEX ||
2772 ctx->so->type == MESA_SHADER_GEOMETRY) {
2773 switch (slot) {
2774 case VARYING_SLOT_POS:
2775 so->writes_pos = true;
2776 break;
2777 case VARYING_SLOT_PSIZ:
2778 so->writes_psize = true;
2779 break;
2780 case VARYING_SLOT_PRIMITIVE_ID:
2781 case VARYING_SLOT_LAYER:
2782 case VARYING_SLOT_GS_VERTEX_FLAGS_IR3:
2783 debug_assert(ctx->so->type == MESA_SHADER_GEOMETRY);
2784 /* fall through */
2785 case VARYING_SLOT_COL0:
2786 case VARYING_SLOT_COL1:
2787 case VARYING_SLOT_BFC0:
2788 case VARYING_SLOT_BFC1:
2789 case VARYING_SLOT_FOGC:
2790 case VARYING_SLOT_CLIP_DIST0:
2791 case VARYING_SLOT_CLIP_DIST1:
2792 case VARYING_SLOT_CLIP_VERTEX:
2793 break;
2794 default:
2795 if (slot >= VARYING_SLOT_VAR0)
2796 break;
2797 if ((VARYING_SLOT_TEX0 <= slot) && (slot <= VARYING_SLOT_TEX7))
2798 break;
2799 ir3_context_error(ctx, "unknown %s shader output name: %s\n",
2800 _mesa_shader_stage_to_string(ctx->so->type),
2801 gl_varying_slot_name(slot));
2802 }
2803 } else {
2804 ir3_context_error(ctx, "unknown shader type: %d\n", ctx->so->type);
2805 }
2806
2807 compile_assert(ctx, n < ARRAY_SIZE(so->outputs));
2808
2809 so->outputs[n].slot = slot;
2810 so->outputs[n].regid = regid(n, comp);
2811 so->outputs_count = MAX2(so->outputs_count, n + 1);
2812
2813 for (int i = 0; i < ncomp; i++) {
2814 unsigned idx = (n * 4) + i + frac;
2815 compile_assert(ctx, idx < ctx->ir->noutputs);
2816 ctx->ir->outputs[idx] = create_immed(ctx->block, fui(0.0));
2817 }
2818
2819 /* if varying packing doesn't happen, we could end up in a situation
2820 * with "holes" in the output, and since the per-generation code that
2821 * sets up varying linkage registers doesn't expect to have more than
2822 * one varying per vec4 slot, pad the holes.
2823 *
2824 * Note that this should probably generate a performance warning of
2825 * some sort.
2826 */
2827 for (int i = 0; i < frac; i++) {
2828 unsigned idx = (n * 4) + i;
2829 if (!ctx->ir->outputs[idx]) {
2830 ctx->ir->outputs[idx] = create_immed(ctx->block, fui(0.0));
2831 }
2832 }
2833 }
2834
2835 static int
2836 max_drvloc(struct exec_list *vars)
2837 {
2838 int drvloc = -1;
2839 nir_foreach_variable(var, vars) {
2840 drvloc = MAX2(drvloc, (int)var->data.driver_location);
2841 }
2842 return drvloc;
2843 }
2844
2845 static const unsigned max_sysvals[] = {
2846 [MESA_SHADER_VERTEX] = 16,
2847 [MESA_SHADER_GEOMETRY] = 16,
2848 [MESA_SHADER_FRAGMENT] = 24, // TODO
2849 [MESA_SHADER_COMPUTE] = 16, // TODO how many do we actually need?
2850 [MESA_SHADER_KERNEL] = 16, // TODO how many do we actually need?
2851 };
2852
2853 static void
2854 emit_instructions(struct ir3_context *ctx)
2855 {
2856 unsigned ninputs, noutputs;
2857 nir_function_impl *fxn = nir_shader_get_entrypoint(ctx->s);
2858
2859 ninputs = (max_drvloc(&ctx->s->inputs) + 1) * 4;
2860 noutputs = (max_drvloc(&ctx->s->outputs) + 1) * 4;
2861
2862 /* we need to leave room for sysvals:
2863 */
2864 ninputs += max_sysvals[ctx->so->type];
2865 if (ctx->so->type == MESA_SHADER_VERTEX)
2866 noutputs += 8; /* gs or tess header + primitive_id */
2867
2868 ctx->ir = ir3_create(ctx->compiler, ctx->so->type, ninputs, noutputs);
2869
2870 /* Create inputs in first block: */
2871 ctx->block = get_block(ctx, nir_start_block(fxn));
2872 ctx->in_block = ctx->block;
2873 list_addtail(&ctx->block->node, &ctx->ir->block_list);
2874
2875 ninputs -= max_sysvals[ctx->so->type];
2876
2877 if (ctx->so->key.has_gs) {
2878 if (ctx->so->type == MESA_SHADER_VERTEX ||
2879 ctx->so->type == MESA_SHADER_GEOMETRY) {
2880 ctx->gs_header = create_input(ctx, 0);
2881 ctx->primitive_id = create_input(ctx, 0);
2882 }
2883 }
2884
2885 /* for fragment shader, the vcoord input register is used as the
2886 * base for bary.f varying fetch instrs:
2887 *
2888 * TODO defer creating ctx->ij_pixel and corresponding sysvals
2889 * until emit_intrinsic when we know they are actually needed.
2890 * For now, we defer creating ctx->ij_centroid, etc, since we
2891 * only need ij_pixel for "old style" varying inputs (ie.
2892 * tgsi_to_nir)
2893 */
2894 struct ir3_instruction *vcoord = NULL;
2895 if (ctx->so->type == MESA_SHADER_FRAGMENT) {
2896 struct ir3_instruction *xy[2];
2897
2898 vcoord = create_input_compmask(ctx, 0, 0x3);
2899 ir3_split_dest(ctx->block, xy, vcoord, 0, 2);
2900
2901 ctx->ij_pixel = ir3_create_collect(ctx, xy, 2);
2902 }
2903
2904 /* Setup inputs: */
2905 nir_foreach_variable(var, &ctx->s->inputs) {
2906 setup_input(ctx, var);
2907 }
2908
2909 /* Defer add_sysval_input() stuff until after setup_inputs(),
2910 * because sysvals need to be appended after varyings:
2911 */
2912 if (vcoord) {
2913 add_sysval_input_compmask(ctx, SYSTEM_VALUE_BARYCENTRIC_PIXEL,
2914 0x3, vcoord);
2915 }
2916
2917 if (ctx->primitive_id)
2918 add_sysval_input(ctx, SYSTEM_VALUE_PRIMITIVE_ID, ctx->primitive_id);
2919 if (ctx->gs_header)
2920 add_sysval_input(ctx, SYSTEM_VALUE_GS_HEADER_IR3, ctx->gs_header);
2921
2922 /* Setup outputs: */
2923 nir_foreach_variable(var, &ctx->s->outputs) {
2924 setup_output(ctx, var);
2925 }
2926
2927 /* Set up the gs header as an output for the vertex shader so it won't
2928 * clobber it for the tess ctrl shader. */
2929 if (ctx->so->type == MESA_SHADER_VERTEX) {
2930 struct ir3_shader_variant *so = ctx->so;
2931 if (ctx->primitive_id) {
2932 unsigned n = so->outputs_count++;
2933 so->outputs[n].slot = VARYING_SLOT_PRIMITIVE_ID;
2934 so->outputs[n].regid = regid(n, 0);
2935 ctx->ir->outputs[n * 4] = ctx->primitive_id;
2936
2937 compile_assert(ctx, n * 4 < ctx->ir->noutputs);
2938 }
2939
2940 if (ctx->gs_header) {
2941 unsigned n = so->outputs_count++;
2942 so->outputs[n].slot = VARYING_SLOT_GS_HEADER_IR3;
2943 so->outputs[n].regid = regid(n, 0);
2944 ctx->ir->outputs[n * 4] = ctx->gs_header;
2945
2946 compile_assert(ctx, n * 4 < ctx->ir->noutputs);
2947 }
2948
2949 }
2950
2951 /* Find # of samplers: */
2952 nir_foreach_variable(var, &ctx->s->uniforms) {
2953 ctx->so->num_samp += glsl_type_get_sampler_count(var->type);
2954 /* just assume that we'll be reading from images.. if it
2955 * is write-only we don't have to count it, but not sure
2956 * if there is a good way to know?
2957 */
2958 ctx->so->num_samp += glsl_type_get_image_count(var->type);
2959 }
2960
2961 /* NOTE: need to do something more clever when we support >1 fxn */
2962 nir_foreach_register(reg, &fxn->registers) {
2963 ir3_declare_array(ctx, reg);
2964 }
2965 /* And emit the body: */
2966 ctx->impl = fxn;
2967 emit_function(ctx, fxn);
2968 }
2969
2970 /* from NIR perspective, we actually have varying inputs. But the varying
2971 * inputs, from an IR standpoint, are just bary.f/ldlv instructions. The
2972 * only actual inputs are the sysvals.
2973 */
2974 static void
2975 fixup_frag_inputs(struct ir3_context *ctx)
2976 {
2977 struct ir3_shader_variant *so = ctx->so;
2978 struct ir3 *ir = ctx->ir;
2979 unsigned i = 0;
2980
2981 /* sysvals should appear at the end of the inputs, drop everything else: */
2982 while ((i < so->inputs_count) && !so->inputs[i].sysval)
2983 i++;
2984
2985 /* at IR level, inputs are always blocks of 4 scalars: */
2986 i *= 4;
2987
2988 ir->inputs = &ir->inputs[i];
2989 ir->ninputs -= i;
2990 }
2991
2992 /* Fixup tex sampler state for astc/srgb workaround instructions. We
2993 * need to assign the tex state indexes for these after we know the
2994 * max tex index.
2995 */
2996 static void
2997 fixup_astc_srgb(struct ir3_context *ctx)
2998 {
2999 struct ir3_shader_variant *so = ctx->so;
3000 /* indexed by original tex idx, value is newly assigned alpha sampler
3001 * state tex idx. Zero is invalid since there is at least one sampler
3002 * if we get here.
3003 */
3004 unsigned alt_tex_state[16] = {0};
3005 unsigned tex_idx = ctx->max_texture_index + 1;
3006 unsigned idx = 0;
3007
3008 so->astc_srgb.base = tex_idx;
3009
3010 for (unsigned i = 0; i < ctx->ir->astc_srgb_count; i++) {
3011 struct ir3_instruction *sam = ctx->ir->astc_srgb[i];
3012
3013 compile_assert(ctx, sam->cat5.tex < ARRAY_SIZE(alt_tex_state));
3014
3015 if (alt_tex_state[sam->cat5.tex] == 0) {
3016 /* assign new alternate/alpha tex state slot: */
3017 alt_tex_state[sam->cat5.tex] = tex_idx++;
3018 so->astc_srgb.orig_idx[idx++] = sam->cat5.tex;
3019 so->astc_srgb.count++;
3020 }
3021
3022 sam->cat5.tex = alt_tex_state[sam->cat5.tex];
3023 }
3024 }
3025
3026 static void
3027 fixup_binning_pass(struct ir3_context *ctx)
3028 {
3029 struct ir3_shader_variant *so = ctx->so;
3030 struct ir3 *ir = ctx->ir;
3031 unsigned i, j;
3032
3033 for (i = 0, j = 0; i < so->outputs_count; i++) {
3034 unsigned slot = so->outputs[i].slot;
3035
3036 /* throw away everything but first position/psize */
3037 if ((slot == VARYING_SLOT_POS) || (slot == VARYING_SLOT_PSIZ)) {
3038 if (i != j) {
3039 so->outputs[j] = so->outputs[i];
3040 ir->outputs[(j*4)+0] = ir->outputs[(i*4)+0];
3041 ir->outputs[(j*4)+1] = ir->outputs[(i*4)+1];
3042 ir->outputs[(j*4)+2] = ir->outputs[(i*4)+2];
3043 ir->outputs[(j*4)+3] = ir->outputs[(i*4)+3];
3044 }
3045 j++;
3046 }
3047 }
3048 so->outputs_count = j;
3049 ir->noutputs = j * 4;
3050 }
3051
3052 static void
3053 collect_tex_prefetches(struct ir3_context *ctx, struct ir3 *ir)
3054 {
3055 unsigned idx = 0;
3056
3057 /* Collect sampling instructions eligible for pre-dispatch. */
3058 list_for_each_entry(struct ir3_block, block, &ir->block_list, node) {
3059 list_for_each_entry_safe(struct ir3_instruction, instr,
3060 &block->instr_list, node) {
3061 if (instr->opc == OPC_META_TEX_PREFETCH) {
3062 assert(idx < ARRAY_SIZE(ctx->so->sampler_prefetch));
3063 struct ir3_sampler_prefetch *fetch =
3064 &ctx->so->sampler_prefetch[idx];
3065 idx++;
3066
3067 fetch->cmd = IR3_SAMPLER_PREFETCH_CMD;
3068 fetch->wrmask = instr->regs[0]->wrmask;
3069 fetch->tex_id = instr->prefetch.tex;
3070 fetch->samp_id = instr->prefetch.samp;
3071 fetch->dst = instr->regs[0]->num;
3072 fetch->src = instr->prefetch.input_offset;
3073
3074 ctx->so->total_in =
3075 MAX2(ctx->so->total_in, instr->prefetch.input_offset + 2);
3076
3077 /* Disable half precision until supported. */
3078 fetch->half_precision = 0x0;
3079
3080 /* Remove the prefetch placeholder instruction: */
3081 list_delinit(&instr->node);
3082 }
3083 }
3084 }
3085 }
3086
3087 int
3088 ir3_compile_shader_nir(struct ir3_compiler *compiler,
3089 struct ir3_shader_variant *so)
3090 {
3091 struct ir3_context *ctx;
3092 struct ir3 *ir;
3093 struct ir3_instruction **inputs;
3094 unsigned i;
3095 int ret = 0, max_bary;
3096
3097 assert(!so->ir);
3098
3099 ctx = ir3_context_init(compiler, so);
3100 if (!ctx) {
3101 DBG("INIT failed!");
3102 ret = -1;
3103 goto out;
3104 }
3105
3106 emit_instructions(ctx);
3107
3108 if (ctx->error) {
3109 DBG("EMIT failed!");
3110 ret = -1;
3111 goto out;
3112 }
3113
3114 ir = so->ir = ctx->ir;
3115
3116 /* keep track of the inputs from TGSI perspective.. */
3117 inputs = ir->inputs;
3118
3119 /* but fixup actual inputs for frag shader: */
3120 if (so->type == MESA_SHADER_FRAGMENT)
3121 fixup_frag_inputs(ctx);
3122
3123 /* at this point, for binning pass, throw away unneeded outputs: */
3124 if (so->binning_pass && (ctx->compiler->gpu_id < 600))
3125 fixup_binning_pass(ctx);
3126
3127 /* if we want half-precision outputs, mark the output registers
3128 * as half:
3129 */
3130 if (so->key.half_precision) {
3131 for (i = 0; i < ir->noutputs; i++) {
3132 struct ir3_instruction *out = ir->outputs[i];
3133
3134 if (!out)
3135 continue;
3136
3137 /* if frag shader writes z, that needs to be full precision: */
3138 if (so->outputs[i/4].slot == FRAG_RESULT_DEPTH)
3139 continue;
3140
3141 out->regs[0]->flags |= IR3_REG_HALF;
3142 /* output could be a fanout (ie. texture fetch output)
3143 * in which case we need to propagate the half-reg flag
3144 * up to the definer so that RA sees it:
3145 */
3146 if (out->opc == OPC_META_FO) {
3147 out = out->regs[1]->instr;
3148 out->regs[0]->flags |= IR3_REG_HALF;
3149 }
3150
3151 if (out->opc == OPC_MOV) {
3152 out->cat1.dst_type = half_type(out->cat1.dst_type);
3153 }
3154 }
3155 }
3156
3157 if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
3158 printf("BEFORE CP:\n");
3159 ir3_print(ir);
3160 }
3161
3162 ir3_cp(ir, so);
3163
3164 /* at this point, for binning pass, throw away unneeded outputs:
3165 * Note that for a6xx and later, we do this after ir3_cp to ensure
3166 * that the uniform/constant layout for BS and VS matches, so that
3167 * we can re-use same VS_CONST state group.
3168 */
3169 if (so->binning_pass && (ctx->compiler->gpu_id >= 600))
3170 fixup_binning_pass(ctx);
3171
3172 /* for a6xx+, binning and draw pass VS use same VBO state, so we
3173 * need to make sure not to remove any inputs that are used by
3174 * the nonbinning VS.
3175 */
3176 if (ctx->compiler->gpu_id >= 600 && so->binning_pass) {
3177 debug_assert(so->type == MESA_SHADER_VERTEX);
3178 for (int i = 0; i < ir->ninputs; i++) {
3179 struct ir3_instruction *in = ir->inputs[i];
3180
3181 if (!in)
3182 continue;
3183
3184 unsigned n = i / 4;
3185 unsigned c = i % 4;
3186
3187 debug_assert(n < so->nonbinning->inputs_count);
3188
3189 if (so->nonbinning->inputs[n].sysval)
3190 continue;
3191
3192 /* be sure to keep inputs, even if only used in VS */
3193 if (so->nonbinning->inputs[n].compmask & (1 << c))
3194 array_insert(in->block, in->block->keeps, in);
3195 }
3196 }
3197
3198 /* Insert mov if there's same instruction for each output.
3199 * eg. dEQP-GLES31.functional.shaders.opaque_type_indexing.sampler.const_expression.vertex.sampler2dshadow
3200 */
3201 for (int i = ir->noutputs - 1; i >= 0; i--) {
3202 if (!ir->outputs[i])
3203 continue;
3204 for (unsigned j = 0; j < i; j++) {
3205 if (ir->outputs[i] == ir->outputs[j]) {
3206 ir->outputs[i] =
3207 ir3_MOV(ir->outputs[i]->block, ir->outputs[i], TYPE_F32);
3208 }
3209 }
3210 }
3211
3212 if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
3213 printf("BEFORE GROUPING:\n");
3214 ir3_print(ir);
3215 }
3216
3217 ir3_sched_add_deps(ir);
3218
3219 /* Group left/right neighbors, inserting mov's where needed to
3220 * solve conflicts:
3221 */
3222 ir3_group(ir);
3223
3224 if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
3225 printf("AFTER GROUPING:\n");
3226 ir3_print(ir);
3227 }
3228
3229 ir3_depth(ir, so);
3230
3231 if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
3232 printf("AFTER DEPTH:\n");
3233 ir3_print(ir);
3234 }
3235
3236 /* do Sethi–Ullman numbering before scheduling: */
3237 ir3_sun(ir);
3238
3239 ret = ir3_sched(ir);
3240 if (ret) {
3241 DBG("SCHED failed!");
3242 goto out;
3243 }
3244
3245 if (compiler->gpu_id >= 600) {
3246 ir3_a6xx_fixup_atomic_dests(ir, so);
3247 }
3248
3249 if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
3250 printf("AFTER SCHED:\n");
3251 ir3_print(ir);
3252 }
3253
3254 /* Pre-assign VS inputs on a6xx+ binning pass shader, to align
3255 * with draw pass VS, so binning and draw pass can both use the
3256 * same VBO state.
3257 *
3258 * Note that VS inputs are expected to be full precision.
3259 */
3260 bool pre_assign_inputs = (ir->compiler->gpu_id >= 600) &&
3261 (ir->type == MESA_SHADER_VERTEX) &&
3262 so->binning_pass;
3263
3264 if (pre_assign_inputs) {
3265 for (unsigned i = 0; i < ir->ninputs; i++) {
3266 struct ir3_instruction *instr = ir->inputs[i];
3267
3268 if (!instr)
3269 continue;
3270
3271 unsigned n = i / 4;
3272 unsigned c = i % 4;
3273 unsigned regid = so->nonbinning->inputs[n].regid + c;
3274
3275 instr->regs[0]->num = regid;
3276 }
3277
3278 ret = ir3_ra(so, ir->inputs, ir->ninputs);
3279 } else if (ctx->gs_header) {
3280 /* We need to have these values in the same registers between VS and GS
3281 * since the VS chains to GS and doesn't get the sysvals redelivered.
3282 */
3283
3284 ctx->gs_header->regs[0]->num = 0;
3285 ctx->primitive_id->regs[0]->num = 1;
3286 struct ir3_instruction *precolor[] = { ctx->gs_header, ctx->primitive_id };
3287 ret = ir3_ra(so, precolor, ARRAY_SIZE(precolor));
3288 } else if (so->num_sampler_prefetch) {
3289 assert(so->type == MESA_SHADER_FRAGMENT);
3290 struct ir3_instruction *precolor[2];
3291 int idx = 0;
3292
3293 for (unsigned i = 0; i < ir->ninputs; i++) {
3294 struct ir3_instruction *instr = ctx->ir->inputs[i];
3295
3296 if (!instr)
3297 continue;
3298
3299 if (instr->input.sysval != SYSTEM_VALUE_BARYCENTRIC_PIXEL)
3300 continue;
3301
3302 assert(idx < ARRAY_SIZE(precolor));
3303
3304 precolor[idx] = instr;
3305 instr->regs[0]->num = idx;
3306
3307 idx++;
3308 }
3309 ret = ir3_ra(so, precolor, idx);
3310 } else {
3311 ret = ir3_ra(so, NULL, 0);
3312 }
3313
3314 if (ret) {
3315 DBG("RA failed!");
3316 goto out;
3317 }
3318
3319 if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
3320 printf("AFTER RA:\n");
3321 ir3_print(ir);
3322 }
3323
3324 if (so->type == MESA_SHADER_FRAGMENT)
3325 pack_inlocs(ctx);
3326
3327 /* fixup input/outputs: */
3328 for (i = 0; i < so->outputs_count; i++) {
3329 /* sometimes we get outputs that don't write the .x coord, like:
3330 *
3331 * decl_var shader_out INTERP_MODE_NONE float Color (VARYING_SLOT_VAR9.z, 1, 0)
3332 *
3333 * Presumably the result of varying packing and then eliminating
3334 * some unneeded varyings? Just skip head to the first valid
3335 * component of the output.
3336 */
3337 for (unsigned j = 0; j < 4; j++) {
3338 struct ir3_instruction *instr = ir->outputs[(i*4) + j];
3339 if (instr) {
3340 so->outputs[i].regid = instr->regs[0]->num;
3341 so->outputs[i].half = !!(instr->regs[0]->flags & IR3_REG_HALF);
3342 break;
3343 }
3344 }
3345 }
3346
3347 /* Note that some or all channels of an input may be unused: */
3348 for (i = 0; i < so->inputs_count; i++) {
3349 unsigned j, reg = regid(63,0);
3350 bool half = false;
3351 for (j = 0; j < 4; j++) {
3352 struct ir3_instruction *in = inputs[(i*4) + j];
3353
3354 if (!in)
3355 continue;
3356
3357 if (in->flags & IR3_INSTR_UNUSED)
3358 continue;
3359
3360 reg = in->regs[0]->num - j;
3361 if (half) {
3362 compile_assert(ctx, in->regs[0]->flags & IR3_REG_HALF);
3363 } else {
3364 half = !!(in->regs[0]->flags & IR3_REG_HALF);
3365 }
3366 }
3367 so->inputs[i].regid = reg;
3368 so->inputs[i].half = half;
3369 }
3370
3371 if (ctx->astc_srgb)
3372 fixup_astc_srgb(ctx);
3373
3374 /* We need to do legalize after (for frag shader's) the "bary.f"
3375 * offsets (inloc) have been assigned.
3376 */
3377 ir3_legalize(ir, &so->has_ssbo, &so->need_pixlod, &max_bary);
3378
3379 if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
3380 printf("AFTER LEGALIZE:\n");
3381 ir3_print(ir);
3382 }
3383
3384 /* Set (ss)(sy) on first TCS and GEOMETRY instructions, since we don't
3385 * know what we might have to wait on when coming in from VS chsh.
3386 */
3387 if (so->type == MESA_SHADER_TESS_CTRL ||
3388 so->type == MESA_SHADER_GEOMETRY ) {
3389 list_for_each_entry (struct ir3_block, block, &ir->block_list, node) {
3390 list_for_each_entry (struct ir3_instruction, instr, &block->instr_list, node) {
3391 instr->flags |= IR3_INSTR_SS | IR3_INSTR_SY;
3392 break;
3393 }
3394 }
3395 }
3396
3397 so->branchstack = ctx->max_stack;
3398
3399 /* Note that actual_in counts inputs that are not bary.f'd for FS: */
3400 if (so->type == MESA_SHADER_FRAGMENT)
3401 so->total_in = max_bary + 1;
3402
3403 so->max_sun = ir->max_sun;
3404
3405 /* Collect sampling instructions eligible for pre-dispatch. */
3406 collect_tex_prefetches(ctx, ir);
3407
3408 out:
3409 if (ret) {
3410 if (so->ir)
3411 ir3_destroy(so->ir);
3412 so->ir = NULL;
3413 }
3414 ir3_context_free(ctx);
3415
3416 return ret;
3417 }