freedreno/ir3: convert block->predecessors to set
[mesa.git] / src / freedreno / ir3 / ir3_compiler_nir.c
1 /*
2 * Copyright (C) 2015 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #include <stdarg.h>
28
29 #include "util/u_string.h"
30 #include "util/u_memory.h"
31 #include "util/u_math.h"
32
33 #include "ir3_compiler.h"
34 #include "ir3_image.h"
35 #include "ir3_shader.h"
36 #include "ir3_nir.h"
37
38 #include "instr-a3xx.h"
39 #include "ir3.h"
40 #include "ir3_context.h"
41
42
43 static struct ir3_instruction *
44 create_indirect_load(struct ir3_context *ctx, unsigned arrsz, int n,
45 struct ir3_instruction *address, struct ir3_instruction *collect)
46 {
47 struct ir3_block *block = ctx->block;
48 struct ir3_instruction *mov;
49 struct ir3_register *src;
50
51 mov = ir3_instr_create(block, OPC_MOV);
52 mov->cat1.src_type = TYPE_U32;
53 mov->cat1.dst_type = TYPE_U32;
54 ir3_reg_create(mov, 0, 0);
55 src = ir3_reg_create(mov, 0, IR3_REG_SSA | IR3_REG_RELATIV);
56 src->instr = collect;
57 src->size = arrsz;
58 src->array.offset = n;
59
60 ir3_instr_set_address(mov, address);
61
62 return mov;
63 }
64
65 static struct ir3_instruction *
66 create_input_compmask(struct ir3_context *ctx, unsigned n, unsigned compmask)
67 {
68 struct ir3_instruction *in;
69
70 in = ir3_instr_create(ctx->in_block, OPC_META_INPUT);
71 in->inout.block = ctx->in_block;
72 ir3_reg_create(in, n, 0);
73
74 in->regs[0]->wrmask = compmask;
75
76 return in;
77 }
78
79 static struct ir3_instruction *
80 create_input(struct ir3_context *ctx, unsigned n)
81 {
82 return create_input_compmask(ctx, n, 0x1);
83 }
84
85 static struct ir3_instruction *
86 create_frag_input(struct ir3_context *ctx, bool use_ldlv, unsigned n)
87 {
88 struct ir3_block *block = ctx->block;
89 struct ir3_instruction *instr;
90 /* packed inloc is fixed up later: */
91 struct ir3_instruction *inloc = create_immed(block, n);
92
93 if (use_ldlv) {
94 instr = ir3_LDLV(block, inloc, 0, create_immed(block, 1), 0);
95 instr->cat6.type = TYPE_U32;
96 instr->cat6.iim_val = 1;
97 } else {
98 instr = ir3_BARY_F(block, inloc, 0, ctx->ij_pixel, 0);
99 instr->regs[2]->wrmask = 0x3;
100 }
101
102 return instr;
103 }
104
105 static struct ir3_instruction *
106 create_driver_param(struct ir3_context *ctx, enum ir3_driver_param dp)
107 {
108 /* first four vec4 sysval's reserved for UBOs: */
109 /* NOTE: dp is in scalar, but there can be >4 dp components: */
110 struct ir3_const_state *const_state = &ctx->so->shader->const_state;
111 unsigned n = const_state->offsets.driver_param;
112 unsigned r = regid(n + dp / 4, dp % 4);
113 return create_uniform(ctx->block, r);
114 }
115
116 /*
117 * Adreno uses uint rather than having dedicated bool type,
118 * which (potentially) requires some conversion, in particular
119 * when using output of an bool instr to int input, or visa
120 * versa.
121 *
122 * | Adreno | NIR |
123 * -------+---------+-------+-
124 * true | 1 | ~0 |
125 * false | 0 | 0 |
126 *
127 * To convert from an adreno bool (uint) to nir, use:
128 *
129 * absneg.s dst, (neg)src
130 *
131 * To convert back in the other direction:
132 *
133 * absneg.s dst, (abs)arc
134 *
135 * The CP step can clean up the absneg.s that cancel each other
136 * out, and with a slight bit of extra cleverness (to recognize
137 * the instructions which produce either a 0 or 1) can eliminate
138 * the absneg.s's completely when an instruction that wants
139 * 0/1 consumes the result. For example, when a nir 'bcsel'
140 * consumes the result of 'feq'. So we should be able to get by
141 * without a boolean resolve step, and without incuring any
142 * extra penalty in instruction count.
143 */
144
145 /* NIR bool -> native (adreno): */
146 static struct ir3_instruction *
147 ir3_b2n(struct ir3_block *block, struct ir3_instruction *instr)
148 {
149 return ir3_ABSNEG_S(block, instr, IR3_REG_SABS);
150 }
151
152 /* native (adreno) -> NIR bool: */
153 static struct ir3_instruction *
154 ir3_n2b(struct ir3_block *block, struct ir3_instruction *instr)
155 {
156 return ir3_ABSNEG_S(block, instr, IR3_REG_SNEG);
157 }
158
159 /*
160 * alu/sfu instructions:
161 */
162
163 static struct ir3_instruction *
164 create_cov(struct ir3_context *ctx, struct ir3_instruction *src,
165 unsigned src_bitsize, nir_op op)
166 {
167 type_t src_type, dst_type;
168
169 switch (op) {
170 case nir_op_f2f32:
171 case nir_op_f2f16_rtne:
172 case nir_op_f2f16_rtz:
173 case nir_op_f2f16:
174 case nir_op_f2i32:
175 case nir_op_f2i16:
176 case nir_op_f2i8:
177 case nir_op_f2u32:
178 case nir_op_f2u16:
179 case nir_op_f2u8:
180 switch (src_bitsize) {
181 case 32:
182 src_type = TYPE_F32;
183 break;
184 case 16:
185 src_type = TYPE_F16;
186 break;
187 default:
188 ir3_context_error(ctx, "invalid src bit size: %u", src_bitsize);
189 }
190 break;
191
192 case nir_op_i2f32:
193 case nir_op_i2f16:
194 case nir_op_i2i32:
195 case nir_op_i2i16:
196 case nir_op_i2i8:
197 switch (src_bitsize) {
198 case 32:
199 src_type = TYPE_S32;
200 break;
201 case 16:
202 src_type = TYPE_S16;
203 break;
204 case 8:
205 src_type = TYPE_S8;
206 break;
207 default:
208 ir3_context_error(ctx, "invalid src bit size: %u", src_bitsize);
209 }
210 break;
211
212 case nir_op_u2f32:
213 case nir_op_u2f16:
214 case nir_op_u2u32:
215 case nir_op_u2u16:
216 case nir_op_u2u8:
217 switch (src_bitsize) {
218 case 32:
219 src_type = TYPE_U32;
220 break;
221 case 16:
222 src_type = TYPE_U16;
223 break;
224 case 8:
225 src_type = TYPE_U8;
226 break;
227 default:
228 ir3_context_error(ctx, "invalid src bit size: %u", src_bitsize);
229 }
230 break;
231
232 default:
233 ir3_context_error(ctx, "invalid conversion op: %u", op);
234 }
235
236 switch (op) {
237 case nir_op_f2f32:
238 case nir_op_i2f32:
239 case nir_op_u2f32:
240 dst_type = TYPE_F32;
241 break;
242
243 case nir_op_f2f16_rtne:
244 case nir_op_f2f16_rtz:
245 case nir_op_f2f16:
246 /* TODO how to handle rounding mode? */
247 case nir_op_i2f16:
248 case nir_op_u2f16:
249 dst_type = TYPE_F16;
250 break;
251
252 case nir_op_f2i32:
253 case nir_op_i2i32:
254 dst_type = TYPE_S32;
255 break;
256
257 case nir_op_f2i16:
258 case nir_op_i2i16:
259 dst_type = TYPE_S16;
260 break;
261
262 case nir_op_f2i8:
263 case nir_op_i2i8:
264 dst_type = TYPE_S8;
265 break;
266
267 case nir_op_f2u32:
268 case nir_op_u2u32:
269 dst_type = TYPE_U32;
270 break;
271
272 case nir_op_f2u16:
273 case nir_op_u2u16:
274 dst_type = TYPE_U16;
275 break;
276
277 case nir_op_f2u8:
278 case nir_op_u2u8:
279 dst_type = TYPE_U8;
280 break;
281
282 default:
283 ir3_context_error(ctx, "invalid conversion op: %u", op);
284 }
285
286 return ir3_COV(ctx->block, src, src_type, dst_type);
287 }
288
289 static void
290 emit_alu(struct ir3_context *ctx, nir_alu_instr *alu)
291 {
292 const nir_op_info *info = &nir_op_infos[alu->op];
293 struct ir3_instruction **dst, *src[info->num_inputs];
294 unsigned bs[info->num_inputs]; /* bit size */
295 struct ir3_block *b = ctx->block;
296 unsigned dst_sz, wrmask;
297 type_t dst_type = nir_dest_bit_size(alu->dest.dest) < 32 ?
298 TYPE_U16 : TYPE_U32;
299
300 if (alu->dest.dest.is_ssa) {
301 dst_sz = alu->dest.dest.ssa.num_components;
302 wrmask = (1 << dst_sz) - 1;
303 } else {
304 dst_sz = alu->dest.dest.reg.reg->num_components;
305 wrmask = alu->dest.write_mask;
306 }
307
308 dst = ir3_get_dst(ctx, &alu->dest.dest, dst_sz);
309
310 /* Vectors are special in that they have non-scalarized writemasks,
311 * and just take the first swizzle channel for each argument in
312 * order into each writemask channel.
313 */
314 if ((alu->op == nir_op_vec2) ||
315 (alu->op == nir_op_vec3) ||
316 (alu->op == nir_op_vec4)) {
317
318 for (int i = 0; i < info->num_inputs; i++) {
319 nir_alu_src *asrc = &alu->src[i];
320
321 compile_assert(ctx, !asrc->abs);
322 compile_assert(ctx, !asrc->negate);
323
324 src[i] = ir3_get_src(ctx, &asrc->src)[asrc->swizzle[0]];
325 if (!src[i])
326 src[i] = create_immed_typed(ctx->block, 0, dst_type);
327 dst[i] = ir3_MOV(b, src[i], dst_type);
328 }
329
330 ir3_put_dst(ctx, &alu->dest.dest);
331 return;
332 }
333
334 /* We also get mov's with more than one component for mov's so
335 * handle those specially:
336 */
337 if (alu->op == nir_op_mov) {
338 nir_alu_src *asrc = &alu->src[0];
339 struct ir3_instruction *const *src0 = ir3_get_src(ctx, &asrc->src);
340
341 for (unsigned i = 0; i < dst_sz; i++) {
342 if (wrmask & (1 << i)) {
343 dst[i] = ir3_MOV(b, src0[asrc->swizzle[i]], dst_type);
344 } else {
345 dst[i] = NULL;
346 }
347 }
348
349 ir3_put_dst(ctx, &alu->dest.dest);
350 return;
351 }
352
353 /* General case: We can just grab the one used channel per src. */
354 for (int i = 0; i < info->num_inputs; i++) {
355 unsigned chan = ffs(alu->dest.write_mask) - 1;
356 nir_alu_src *asrc = &alu->src[i];
357
358 compile_assert(ctx, !asrc->abs);
359 compile_assert(ctx, !asrc->negate);
360
361 src[i] = ir3_get_src(ctx, &asrc->src)[asrc->swizzle[chan]];
362 bs[i] = nir_src_bit_size(asrc->src);
363
364 compile_assert(ctx, src[i]);
365 }
366
367 switch (alu->op) {
368 case nir_op_f2f32:
369 case nir_op_f2f16_rtne:
370 case nir_op_f2f16_rtz:
371 case nir_op_f2f16:
372 case nir_op_f2i32:
373 case nir_op_f2i16:
374 case nir_op_f2i8:
375 case nir_op_f2u32:
376 case nir_op_f2u16:
377 case nir_op_f2u8:
378 case nir_op_i2f32:
379 case nir_op_i2f16:
380 case nir_op_i2i32:
381 case nir_op_i2i16:
382 case nir_op_i2i8:
383 case nir_op_u2f32:
384 case nir_op_u2f16:
385 case nir_op_u2u32:
386 case nir_op_u2u16:
387 case nir_op_u2u8:
388 dst[0] = create_cov(ctx, src[0], bs[0], alu->op);
389 break;
390 case nir_op_f2b32:
391 dst[0] = ir3_CMPS_F(b, src[0], 0, create_immed(b, fui(0.0)), 0);
392 dst[0]->cat2.condition = IR3_COND_NE;
393 dst[0] = ir3_n2b(b, dst[0]);
394 break;
395 case nir_op_b2f16:
396 dst[0] = ir3_COV(b, ir3_b2n(b, src[0]), TYPE_U32, TYPE_F16);
397 break;
398 case nir_op_b2f32:
399 dst[0] = ir3_COV(b, ir3_b2n(b, src[0]), TYPE_U32, TYPE_F32);
400 break;
401 case nir_op_b2i8:
402 case nir_op_b2i16:
403 case nir_op_b2i32:
404 dst[0] = ir3_b2n(b, src[0]);
405 break;
406 case nir_op_i2b32:
407 dst[0] = ir3_CMPS_S(b, src[0], 0, create_immed(b, 0), 0);
408 dst[0]->cat2.condition = IR3_COND_NE;
409 dst[0] = ir3_n2b(b, dst[0]);
410 break;
411
412 case nir_op_fneg:
413 dst[0] = ir3_ABSNEG_F(b, src[0], IR3_REG_FNEG);
414 break;
415 case nir_op_fabs:
416 dst[0] = ir3_ABSNEG_F(b, src[0], IR3_REG_FABS);
417 break;
418 case nir_op_fmax:
419 dst[0] = ir3_MAX_F(b, src[0], 0, src[1], 0);
420 break;
421 case nir_op_fmin:
422 dst[0] = ir3_MIN_F(b, src[0], 0, src[1], 0);
423 break;
424 case nir_op_fsat:
425 /* if there is just a single use of the src, and it supports
426 * (sat) bit, we can just fold the (sat) flag back to the
427 * src instruction and create a mov. This is easier for cp
428 * to eliminate.
429 *
430 * TODO probably opc_cat==4 is ok too
431 */
432 if (alu->src[0].src.is_ssa &&
433 (list_length(&alu->src[0].src.ssa->uses) == 1) &&
434 ((opc_cat(src[0]->opc) == 2) || (opc_cat(src[0]->opc) == 3))) {
435 src[0]->flags |= IR3_INSTR_SAT;
436 dst[0] = ir3_MOV(b, src[0], dst_type);
437 } else {
438 /* otherwise generate a max.f that saturates.. blob does
439 * similar (generating a cat2 mov using max.f)
440 */
441 dst[0] = ir3_MAX_F(b, src[0], 0, src[0], 0);
442 dst[0]->flags |= IR3_INSTR_SAT;
443 }
444 break;
445 case nir_op_fmul:
446 dst[0] = ir3_MUL_F(b, src[0], 0, src[1], 0);
447 break;
448 case nir_op_fadd:
449 dst[0] = ir3_ADD_F(b, src[0], 0, src[1], 0);
450 break;
451 case nir_op_fsub:
452 dst[0] = ir3_ADD_F(b, src[0], 0, src[1], IR3_REG_FNEG);
453 break;
454 case nir_op_ffma:
455 dst[0] = ir3_MAD_F32(b, src[0], 0, src[1], 0, src[2], 0);
456 break;
457 case nir_op_fddx:
458 dst[0] = ir3_DSX(b, src[0], 0);
459 dst[0]->cat5.type = TYPE_F32;
460 break;
461 case nir_op_fddy:
462 dst[0] = ir3_DSY(b, src[0], 0);
463 dst[0]->cat5.type = TYPE_F32;
464 break;
465 break;
466 case nir_op_flt32:
467 dst[0] = ir3_CMPS_F(b, src[0], 0, src[1], 0);
468 dst[0]->cat2.condition = IR3_COND_LT;
469 dst[0] = ir3_n2b(b, dst[0]);
470 break;
471 case nir_op_fge32:
472 dst[0] = ir3_CMPS_F(b, src[0], 0, src[1], 0);
473 dst[0]->cat2.condition = IR3_COND_GE;
474 dst[0] = ir3_n2b(b, dst[0]);
475 break;
476 case nir_op_feq32:
477 dst[0] = ir3_CMPS_F(b, src[0], 0, src[1], 0);
478 dst[0]->cat2.condition = IR3_COND_EQ;
479 dst[0] = ir3_n2b(b, dst[0]);
480 break;
481 case nir_op_fne32:
482 dst[0] = ir3_CMPS_F(b, src[0], 0, src[1], 0);
483 dst[0]->cat2.condition = IR3_COND_NE;
484 dst[0] = ir3_n2b(b, dst[0]);
485 break;
486 case nir_op_fceil:
487 dst[0] = ir3_CEIL_F(b, src[0], 0);
488 break;
489 case nir_op_ffloor:
490 dst[0] = ir3_FLOOR_F(b, src[0], 0);
491 break;
492 case nir_op_ftrunc:
493 dst[0] = ir3_TRUNC_F(b, src[0], 0);
494 break;
495 case nir_op_fround_even:
496 dst[0] = ir3_RNDNE_F(b, src[0], 0);
497 break;
498 case nir_op_fsign:
499 dst[0] = ir3_SIGN_F(b, src[0], 0);
500 break;
501
502 case nir_op_fsin:
503 dst[0] = ir3_SIN(b, src[0], 0);
504 break;
505 case nir_op_fcos:
506 dst[0] = ir3_COS(b, src[0], 0);
507 break;
508 case nir_op_frsq:
509 dst[0] = ir3_RSQ(b, src[0], 0);
510 break;
511 case nir_op_frcp:
512 dst[0] = ir3_RCP(b, src[0], 0);
513 break;
514 case nir_op_flog2:
515 dst[0] = ir3_LOG2(b, src[0], 0);
516 break;
517 case nir_op_fexp2:
518 dst[0] = ir3_EXP2(b, src[0], 0);
519 break;
520 case nir_op_fsqrt:
521 dst[0] = ir3_SQRT(b, src[0], 0);
522 break;
523
524 case nir_op_iabs:
525 dst[0] = ir3_ABSNEG_S(b, src[0], IR3_REG_SABS);
526 break;
527 case nir_op_iadd:
528 dst[0] = ir3_ADD_U(b, src[0], 0, src[1], 0);
529 break;
530 case nir_op_iand:
531 dst[0] = ir3_AND_B(b, src[0], 0, src[1], 0);
532 break;
533 case nir_op_imax:
534 dst[0] = ir3_MAX_S(b, src[0], 0, src[1], 0);
535 break;
536 case nir_op_umax:
537 dst[0] = ir3_MAX_U(b, src[0], 0, src[1], 0);
538 break;
539 case nir_op_imin:
540 dst[0] = ir3_MIN_S(b, src[0], 0, src[1], 0);
541 break;
542 case nir_op_umin:
543 dst[0] = ir3_MIN_U(b, src[0], 0, src[1], 0);
544 break;
545 case nir_op_umul_low:
546 dst[0] = ir3_MULL_U(b, src[0], 0, src[1], 0);
547 break;
548 case nir_op_imadsh_mix16:
549 dst[0] = ir3_MADSH_M16(b, src[0], 0, src[1], 0, src[2], 0);
550 break;
551 case nir_op_ineg:
552 dst[0] = ir3_ABSNEG_S(b, src[0], IR3_REG_SNEG);
553 break;
554 case nir_op_inot:
555 dst[0] = ir3_NOT_B(b, src[0], 0);
556 break;
557 case nir_op_ior:
558 dst[0] = ir3_OR_B(b, src[0], 0, src[1], 0);
559 break;
560 case nir_op_ishl:
561 dst[0] = ir3_SHL_B(b, src[0], 0, src[1], 0);
562 break;
563 case nir_op_ishr:
564 dst[0] = ir3_ASHR_B(b, src[0], 0, src[1], 0);
565 break;
566 case nir_op_isub:
567 dst[0] = ir3_SUB_U(b, src[0], 0, src[1], 0);
568 break;
569 case nir_op_ixor:
570 dst[0] = ir3_XOR_B(b, src[0], 0, src[1], 0);
571 break;
572 case nir_op_ushr:
573 dst[0] = ir3_SHR_B(b, src[0], 0, src[1], 0);
574 break;
575 case nir_op_ilt32:
576 dst[0] = ir3_CMPS_S(b, src[0], 0, src[1], 0);
577 dst[0]->cat2.condition = IR3_COND_LT;
578 dst[0] = ir3_n2b(b, dst[0]);
579 break;
580 case nir_op_ige32:
581 dst[0] = ir3_CMPS_S(b, src[0], 0, src[1], 0);
582 dst[0]->cat2.condition = IR3_COND_GE;
583 dst[0] = ir3_n2b(b, dst[0]);
584 break;
585 case nir_op_ieq32:
586 dst[0] = ir3_CMPS_S(b, src[0], 0, src[1], 0);
587 dst[0]->cat2.condition = IR3_COND_EQ;
588 dst[0] = ir3_n2b(b, dst[0]);
589 break;
590 case nir_op_ine32:
591 dst[0] = ir3_CMPS_S(b, src[0], 0, src[1], 0);
592 dst[0]->cat2.condition = IR3_COND_NE;
593 dst[0] = ir3_n2b(b, dst[0]);
594 break;
595 case nir_op_ult32:
596 dst[0] = ir3_CMPS_U(b, src[0], 0, src[1], 0);
597 dst[0]->cat2.condition = IR3_COND_LT;
598 dst[0] = ir3_n2b(b, dst[0]);
599 break;
600 case nir_op_uge32:
601 dst[0] = ir3_CMPS_U(b, src[0], 0, src[1], 0);
602 dst[0]->cat2.condition = IR3_COND_GE;
603 dst[0] = ir3_n2b(b, dst[0]);
604 break;
605
606 case nir_op_b32csel: {
607 struct ir3_instruction *cond = ir3_b2n(b, src[0]);
608 compile_assert(ctx, bs[1] == bs[2]);
609 /* the boolean condition is 32b even if src[1] and src[2] are
610 * half-precision, but sel.b16 wants all three src's to be the
611 * same type.
612 */
613 if (bs[1] < 32)
614 cond = ir3_COV(b, cond, TYPE_U32, TYPE_U16);
615 dst[0] = ir3_SEL_B32(b, src[1], 0, cond, 0, src[2], 0);
616 break;
617 }
618 case nir_op_bit_count: {
619 // TODO, we need to do this 16b at a time on a5xx+a6xx.. need to
620 // double check on earlier gen's. Once half-precision support is
621 // in place, this should probably move to a NIR lowering pass:
622 struct ir3_instruction *hi, *lo;
623
624 hi = ir3_COV(b, ir3_SHR_B(b, src[0], 0, create_immed(b, 16), 0),
625 TYPE_U32, TYPE_U16);
626 lo = ir3_COV(b, src[0], TYPE_U32, TYPE_U16);
627
628 hi = ir3_CBITS_B(b, hi, 0);
629 lo = ir3_CBITS_B(b, lo, 0);
630
631 // TODO maybe the builders should default to making dst half-precision
632 // if the src's were half precision, to make this less awkward.. otoh
633 // we should probably just do this lowering in NIR.
634 hi->regs[0]->flags |= IR3_REG_HALF;
635 lo->regs[0]->flags |= IR3_REG_HALF;
636
637 dst[0] = ir3_ADD_S(b, hi, 0, lo, 0);
638 dst[0]->regs[0]->flags |= IR3_REG_HALF;
639 dst[0] = ir3_COV(b, dst[0], TYPE_U16, TYPE_U32);
640 break;
641 }
642 case nir_op_ifind_msb: {
643 struct ir3_instruction *cmp;
644 dst[0] = ir3_CLZ_S(b, src[0], 0);
645 cmp = ir3_CMPS_S(b, dst[0], 0, create_immed(b, 0), 0);
646 cmp->cat2.condition = IR3_COND_GE;
647 dst[0] = ir3_SEL_B32(b,
648 ir3_SUB_U(b, create_immed(b, 31), 0, dst[0], 0), 0,
649 cmp, 0, dst[0], 0);
650 break;
651 }
652 case nir_op_ufind_msb:
653 dst[0] = ir3_CLZ_B(b, src[0], 0);
654 dst[0] = ir3_SEL_B32(b,
655 ir3_SUB_U(b, create_immed(b, 31), 0, dst[0], 0), 0,
656 src[0], 0, dst[0], 0);
657 break;
658 case nir_op_find_lsb:
659 dst[0] = ir3_BFREV_B(b, src[0], 0);
660 dst[0] = ir3_CLZ_B(b, dst[0], 0);
661 break;
662 case nir_op_bitfield_reverse:
663 dst[0] = ir3_BFREV_B(b, src[0], 0);
664 break;
665
666 default:
667 ir3_context_error(ctx, "Unhandled ALU op: %s\n",
668 nir_op_infos[alu->op].name);
669 break;
670 }
671
672 ir3_put_dst(ctx, &alu->dest.dest);
673 }
674
675 /* handles direct/indirect UBO reads: */
676 static void
677 emit_intrinsic_load_ubo(struct ir3_context *ctx, nir_intrinsic_instr *intr,
678 struct ir3_instruction **dst)
679 {
680 struct ir3_block *b = ctx->block;
681 struct ir3_instruction *base_lo, *base_hi, *addr, *src0, *src1;
682 /* UBO addresses are the first driver params, but subtract 2 here to
683 * account for nir_lower_uniforms_to_ubo rebasing the UBOs such that UBO 0
684 * is the uniforms: */
685 struct ir3_const_state *const_state = &ctx->so->shader->const_state;
686 unsigned ubo = regid(const_state->offsets.ubo, 0) - 2;
687 const unsigned ptrsz = ir3_pointer_size(ctx->compiler);
688
689 int off = 0;
690
691 /* First src is ubo index, which could either be an immed or not: */
692 src0 = ir3_get_src(ctx, &intr->src[0])[0];
693 if (is_same_type_mov(src0) &&
694 (src0->regs[1]->flags & IR3_REG_IMMED)) {
695 base_lo = create_uniform(b, ubo + (src0->regs[1]->iim_val * ptrsz));
696 base_hi = create_uniform(b, ubo + (src0->regs[1]->iim_val * ptrsz) + 1);
697 } else {
698 base_lo = create_uniform_indirect(b, ubo, ir3_get_addr(ctx, src0, ptrsz));
699 base_hi = create_uniform_indirect(b, ubo + 1, ir3_get_addr(ctx, src0, ptrsz));
700
701 /* NOTE: since relative addressing is used, make sure constlen is
702 * at least big enough to cover all the UBO addresses, since the
703 * assembler won't know what the max address reg is.
704 */
705 ctx->so->constlen = MAX2(ctx->so->constlen,
706 const_state->offsets.ubo + (ctx->s->info.num_ubos * ptrsz));
707 }
708
709 /* note: on 32bit gpu's base_hi is ignored and DCE'd */
710 addr = base_lo;
711
712 if (nir_src_is_const(intr->src[1])) {
713 off += nir_src_as_uint(intr->src[1]);
714 } else {
715 /* For load_ubo_indirect, second src is indirect offset: */
716 src1 = ir3_get_src(ctx, &intr->src[1])[0];
717
718 /* and add offset to addr: */
719 addr = ir3_ADD_S(b, addr, 0, src1, 0);
720 }
721
722 /* if offset is to large to encode in the ldg, split it out: */
723 if ((off + (intr->num_components * 4)) > 1024) {
724 /* split out the minimal amount to improve the odds that
725 * cp can fit the immediate in the add.s instruction:
726 */
727 unsigned off2 = off + (intr->num_components * 4) - 1024;
728 addr = ir3_ADD_S(b, addr, 0, create_immed(b, off2), 0);
729 off -= off2;
730 }
731
732 if (ptrsz == 2) {
733 struct ir3_instruction *carry;
734
735 /* handle 32b rollover, ie:
736 * if (addr < base_lo)
737 * base_hi++
738 */
739 carry = ir3_CMPS_U(b, addr, 0, base_lo, 0);
740 carry->cat2.condition = IR3_COND_LT;
741 base_hi = ir3_ADD_S(b, base_hi, 0, carry, 0);
742
743 addr = ir3_create_collect(ctx, (struct ir3_instruction*[]){ addr, base_hi }, 2);
744 }
745
746 for (int i = 0; i < intr->num_components; i++) {
747 struct ir3_instruction *load =
748 ir3_LDG(b, addr, 0, create_immed(b, 1), 0);
749 load->cat6.type = TYPE_U32;
750 load->cat6.src_offset = off + i * 4; /* byte offset */
751 dst[i] = load;
752 }
753 }
754
755 /* src[] = { block_index } */
756 static void
757 emit_intrinsic_ssbo_size(struct ir3_context *ctx, nir_intrinsic_instr *intr,
758 struct ir3_instruction **dst)
759 {
760 /* SSBO size stored as a const starting at ssbo_sizes: */
761 struct ir3_const_state *const_state = &ctx->so->shader->const_state;
762 unsigned blk_idx = nir_src_as_uint(intr->src[0]);
763 unsigned idx = regid(const_state->offsets.ssbo_sizes, 0) +
764 const_state->ssbo_size.off[blk_idx];
765
766 debug_assert(const_state->ssbo_size.mask & (1 << blk_idx));
767
768 dst[0] = create_uniform(ctx->block, idx);
769 }
770
771 /* src[] = { offset }. const_index[] = { base } */
772 static void
773 emit_intrinsic_load_shared(struct ir3_context *ctx, nir_intrinsic_instr *intr,
774 struct ir3_instruction **dst)
775 {
776 struct ir3_block *b = ctx->block;
777 struct ir3_instruction *ldl, *offset;
778 unsigned base;
779
780 offset = ir3_get_src(ctx, &intr->src[0])[0];
781 base = nir_intrinsic_base(intr);
782
783 ldl = ir3_LDL(b, offset, 0, create_immed(b, intr->num_components), 0);
784 ldl->cat6.src_offset = base;
785 ldl->cat6.type = utype_dst(intr->dest);
786 ldl->regs[0]->wrmask = MASK(intr->num_components);
787
788 ldl->barrier_class = IR3_BARRIER_SHARED_R;
789 ldl->barrier_conflict = IR3_BARRIER_SHARED_W;
790
791 ir3_split_dest(b, dst, ldl, 0, intr->num_components);
792 }
793
794 /* src[] = { value, offset }. const_index[] = { base, write_mask } */
795 static void
796 emit_intrinsic_store_shared(struct ir3_context *ctx, nir_intrinsic_instr *intr)
797 {
798 struct ir3_block *b = ctx->block;
799 struct ir3_instruction *stl, *offset;
800 struct ir3_instruction * const *value;
801 unsigned base, wrmask;
802
803 value = ir3_get_src(ctx, &intr->src[0]);
804 offset = ir3_get_src(ctx, &intr->src[1])[0];
805
806 base = nir_intrinsic_base(intr);
807 wrmask = nir_intrinsic_write_mask(intr);
808
809 /* Combine groups of consecutive enabled channels in one write
810 * message. We use ffs to find the first enabled channel and then ffs on
811 * the bit-inverse, down-shifted writemask to determine the length of
812 * the block of enabled bits.
813 *
814 * (trick stolen from i965's fs_visitor::nir_emit_cs_intrinsic())
815 */
816 while (wrmask) {
817 unsigned first_component = ffs(wrmask) - 1;
818 unsigned length = ffs(~(wrmask >> first_component)) - 1;
819
820 stl = ir3_STL(b, offset, 0,
821 ir3_create_collect(ctx, &value[first_component], length), 0,
822 create_immed(b, length), 0);
823 stl->cat6.dst_offset = first_component + base;
824 stl->cat6.type = utype_src(intr->src[0]);
825 stl->barrier_class = IR3_BARRIER_SHARED_W;
826 stl->barrier_conflict = IR3_BARRIER_SHARED_R | IR3_BARRIER_SHARED_W;
827
828 array_insert(b, b->keeps, stl);
829
830 /* Clear the bits in the writemask that we just wrote, then try
831 * again to see if more channels are left.
832 */
833 wrmask &= (15 << (first_component + length));
834 }
835 }
836
837 /*
838 * CS shared variable atomic intrinsics
839 *
840 * All of the shared variable atomic memory operations read a value from
841 * memory, compute a new value using one of the operations below, write the
842 * new value to memory, and return the original value read.
843 *
844 * All operations take 2 sources except CompSwap that takes 3. These
845 * sources represent:
846 *
847 * 0: The offset into the shared variable storage region that the atomic
848 * operation will operate on.
849 * 1: The data parameter to the atomic function (i.e. the value to add
850 * in shared_atomic_add, etc).
851 * 2: For CompSwap only: the second data parameter.
852 */
853 static struct ir3_instruction *
854 emit_intrinsic_atomic_shared(struct ir3_context *ctx, nir_intrinsic_instr *intr)
855 {
856 struct ir3_block *b = ctx->block;
857 struct ir3_instruction *atomic, *src0, *src1;
858 type_t type = TYPE_U32;
859
860 src0 = ir3_get_src(ctx, &intr->src[0])[0]; /* offset */
861 src1 = ir3_get_src(ctx, &intr->src[1])[0]; /* value */
862
863 switch (intr->intrinsic) {
864 case nir_intrinsic_shared_atomic_add:
865 atomic = ir3_ATOMIC_ADD(b, src0, 0, src1, 0);
866 break;
867 case nir_intrinsic_shared_atomic_imin:
868 atomic = ir3_ATOMIC_MIN(b, src0, 0, src1, 0);
869 type = TYPE_S32;
870 break;
871 case nir_intrinsic_shared_atomic_umin:
872 atomic = ir3_ATOMIC_MIN(b, src0, 0, src1, 0);
873 break;
874 case nir_intrinsic_shared_atomic_imax:
875 atomic = ir3_ATOMIC_MAX(b, src0, 0, src1, 0);
876 type = TYPE_S32;
877 break;
878 case nir_intrinsic_shared_atomic_umax:
879 atomic = ir3_ATOMIC_MAX(b, src0, 0, src1, 0);
880 break;
881 case nir_intrinsic_shared_atomic_and:
882 atomic = ir3_ATOMIC_AND(b, src0, 0, src1, 0);
883 break;
884 case nir_intrinsic_shared_atomic_or:
885 atomic = ir3_ATOMIC_OR(b, src0, 0, src1, 0);
886 break;
887 case nir_intrinsic_shared_atomic_xor:
888 atomic = ir3_ATOMIC_XOR(b, src0, 0, src1, 0);
889 break;
890 case nir_intrinsic_shared_atomic_exchange:
891 atomic = ir3_ATOMIC_XCHG(b, src0, 0, src1, 0);
892 break;
893 case nir_intrinsic_shared_atomic_comp_swap:
894 /* for cmpxchg, src1 is [ui]vec2(data, compare): */
895 src1 = ir3_create_collect(ctx, (struct ir3_instruction*[]){
896 ir3_get_src(ctx, &intr->src[2])[0],
897 src1,
898 }, 2);
899 atomic = ir3_ATOMIC_CMPXCHG(b, src0, 0, src1, 0);
900 break;
901 default:
902 unreachable("boo");
903 }
904
905 atomic->cat6.iim_val = 1;
906 atomic->cat6.d = 1;
907 atomic->cat6.type = type;
908 atomic->barrier_class = IR3_BARRIER_SHARED_W;
909 atomic->barrier_conflict = IR3_BARRIER_SHARED_R | IR3_BARRIER_SHARED_W;
910
911 /* even if nothing consume the result, we can't DCE the instruction: */
912 array_insert(b, b->keeps, atomic);
913
914 return atomic;
915 }
916
917 /* TODO handle actual indirect/dynamic case.. which is going to be weird
918 * to handle with the image_mapping table..
919 */
920 static struct ir3_instruction *
921 get_image_samp_tex_src(struct ir3_context *ctx, nir_intrinsic_instr *intr)
922 {
923 unsigned slot = ir3_get_image_slot(nir_src_as_deref(intr->src[0]));
924 unsigned tex_idx = ir3_image_to_tex(&ctx->so->image_mapping, slot);
925 struct ir3_instruction *texture, *sampler;
926
927 texture = create_immed_typed(ctx->block, tex_idx, TYPE_U16);
928 sampler = create_immed_typed(ctx->block, tex_idx, TYPE_U16);
929
930 return ir3_create_collect(ctx, (struct ir3_instruction*[]){
931 sampler,
932 texture,
933 }, 2);
934 }
935
936 /* src[] = { deref, coord, sample_index }. const_index[] = {} */
937 static void
938 emit_intrinsic_load_image(struct ir3_context *ctx, nir_intrinsic_instr *intr,
939 struct ir3_instruction **dst)
940 {
941 struct ir3_block *b = ctx->block;
942 const nir_variable *var = nir_intrinsic_get_var(intr, 0);
943 struct ir3_instruction *samp_tex = get_image_samp_tex_src(ctx, intr);
944 struct ir3_instruction *sam;
945 struct ir3_instruction * const *src0 = ir3_get_src(ctx, &intr->src[1]);
946 struct ir3_instruction *coords[4];
947 unsigned flags, ncoords = ir3_get_image_coords(var, &flags);
948 type_t type = ir3_get_image_type(var);
949
950 /* hmm, this seems a bit odd, but it is what blob does and (at least
951 * a5xx) just faults on bogus addresses otherwise:
952 */
953 if (flags & IR3_INSTR_3D) {
954 flags &= ~IR3_INSTR_3D;
955 flags |= IR3_INSTR_A;
956 }
957
958 for (unsigned i = 0; i < ncoords; i++)
959 coords[i] = src0[i];
960
961 if (ncoords == 1)
962 coords[ncoords++] = create_immed(b, 0);
963
964 sam = ir3_SAM(b, OPC_ISAM, type, 0b1111, flags,
965 samp_tex, ir3_create_collect(ctx, coords, ncoords), NULL);
966
967 sam->barrier_class = IR3_BARRIER_IMAGE_R;
968 sam->barrier_conflict = IR3_BARRIER_IMAGE_W;
969
970 ir3_split_dest(b, dst, sam, 0, 4);
971 }
972
973 static void
974 emit_intrinsic_image_size(struct ir3_context *ctx, nir_intrinsic_instr *intr,
975 struct ir3_instruction **dst)
976 {
977 struct ir3_block *b = ctx->block;
978 const nir_variable *var = nir_intrinsic_get_var(intr, 0);
979 struct ir3_instruction *samp_tex = get_image_samp_tex_src(ctx, intr);
980 struct ir3_instruction *sam, *lod;
981 unsigned flags, ncoords = ir3_get_image_coords(var, &flags);
982
983 lod = create_immed(b, 0);
984 sam = ir3_SAM(b, OPC_GETSIZE, TYPE_U32, 0b1111, flags,
985 samp_tex, lod, NULL);
986
987 /* Array size actually ends up in .w rather than .z. This doesn't
988 * matter for miplevel 0, but for higher mips the value in z is
989 * minified whereas w stays. Also, the value in TEX_CONST_3_DEPTH is
990 * returned, which means that we have to add 1 to it for arrays for
991 * a3xx.
992 *
993 * Note use a temporary dst and then copy, since the size of the dst
994 * array that is passed in is based on nir's understanding of the
995 * result size, not the hardware's
996 */
997 struct ir3_instruction *tmp[4];
998
999 ir3_split_dest(b, tmp, sam, 0, 4);
1000
1001 /* get_size instruction returns size in bytes instead of texels
1002 * for imageBuffer, so we need to divide it by the pixel size
1003 * of the image format.
1004 *
1005 * TODO: This is at least true on a5xx. Check other gens.
1006 */
1007 enum glsl_sampler_dim dim =
1008 glsl_get_sampler_dim(glsl_without_array(var->type));
1009 if (dim == GLSL_SAMPLER_DIM_BUF) {
1010 /* Since all the possible values the divisor can take are
1011 * power-of-two (4, 8, or 16), the division is implemented
1012 * as a shift-right.
1013 * During shader setup, the log2 of the image format's
1014 * bytes-per-pixel should have been emitted in 2nd slot of
1015 * image_dims. See ir3_shader::emit_image_dims().
1016 */
1017 struct ir3_const_state *const_state = &ctx->so->shader->const_state;
1018 unsigned cb = regid(const_state->offsets.image_dims, 0) +
1019 const_state->image_dims.off[var->data.driver_location];
1020 struct ir3_instruction *aux = create_uniform(b, cb + 1);
1021
1022 tmp[0] = ir3_SHR_B(b, tmp[0], 0, aux, 0);
1023 }
1024
1025 for (unsigned i = 0; i < ncoords; i++)
1026 dst[i] = tmp[i];
1027
1028 if (flags & IR3_INSTR_A) {
1029 if (ctx->compiler->levels_add_one) {
1030 dst[ncoords-1] = ir3_ADD_U(b, tmp[3], 0, create_immed(b, 1), 0);
1031 } else {
1032 dst[ncoords-1] = ir3_MOV(b, tmp[3], TYPE_U32);
1033 }
1034 }
1035 }
1036
1037 static void
1038 emit_intrinsic_barrier(struct ir3_context *ctx, nir_intrinsic_instr *intr)
1039 {
1040 struct ir3_block *b = ctx->block;
1041 struct ir3_instruction *barrier;
1042
1043 switch (intr->intrinsic) {
1044 case nir_intrinsic_barrier:
1045 barrier = ir3_BAR(b);
1046 barrier->cat7.g = true;
1047 barrier->cat7.l = true;
1048 barrier->flags = IR3_INSTR_SS | IR3_INSTR_SY;
1049 barrier->barrier_class = IR3_BARRIER_EVERYTHING;
1050 break;
1051 case nir_intrinsic_memory_barrier:
1052 barrier = ir3_FENCE(b);
1053 barrier->cat7.g = true;
1054 barrier->cat7.r = true;
1055 barrier->cat7.w = true;
1056 barrier->cat7.l = true;
1057 barrier->barrier_class = IR3_BARRIER_IMAGE_W |
1058 IR3_BARRIER_BUFFER_W;
1059 barrier->barrier_conflict =
1060 IR3_BARRIER_IMAGE_R | IR3_BARRIER_IMAGE_W |
1061 IR3_BARRIER_BUFFER_R | IR3_BARRIER_BUFFER_W;
1062 break;
1063 case nir_intrinsic_memory_barrier_atomic_counter:
1064 case nir_intrinsic_memory_barrier_buffer:
1065 barrier = ir3_FENCE(b);
1066 barrier->cat7.g = true;
1067 barrier->cat7.r = true;
1068 barrier->cat7.w = true;
1069 barrier->barrier_class = IR3_BARRIER_BUFFER_W;
1070 barrier->barrier_conflict = IR3_BARRIER_BUFFER_R |
1071 IR3_BARRIER_BUFFER_W;
1072 break;
1073 case nir_intrinsic_memory_barrier_image:
1074 // TODO double check if this should have .g set
1075 barrier = ir3_FENCE(b);
1076 barrier->cat7.g = true;
1077 barrier->cat7.r = true;
1078 barrier->cat7.w = true;
1079 barrier->barrier_class = IR3_BARRIER_IMAGE_W;
1080 barrier->barrier_conflict = IR3_BARRIER_IMAGE_R |
1081 IR3_BARRIER_IMAGE_W;
1082 break;
1083 case nir_intrinsic_memory_barrier_shared:
1084 barrier = ir3_FENCE(b);
1085 barrier->cat7.g = true;
1086 barrier->cat7.l = true;
1087 barrier->cat7.r = true;
1088 barrier->cat7.w = true;
1089 barrier->barrier_class = IR3_BARRIER_SHARED_W;
1090 barrier->barrier_conflict = IR3_BARRIER_SHARED_R |
1091 IR3_BARRIER_SHARED_W;
1092 break;
1093 case nir_intrinsic_group_memory_barrier:
1094 barrier = ir3_FENCE(b);
1095 barrier->cat7.g = true;
1096 barrier->cat7.l = true;
1097 barrier->cat7.r = true;
1098 barrier->cat7.w = true;
1099 barrier->barrier_class = IR3_BARRIER_SHARED_W |
1100 IR3_BARRIER_IMAGE_W |
1101 IR3_BARRIER_BUFFER_W;
1102 barrier->barrier_conflict =
1103 IR3_BARRIER_SHARED_R | IR3_BARRIER_SHARED_W |
1104 IR3_BARRIER_IMAGE_R | IR3_BARRIER_IMAGE_W |
1105 IR3_BARRIER_BUFFER_R | IR3_BARRIER_BUFFER_W;
1106 break;
1107 default:
1108 unreachable("boo");
1109 }
1110
1111 /* make sure barrier doesn't get DCE'd */
1112 array_insert(b, b->keeps, barrier);
1113 }
1114
1115 static void add_sysval_input_compmask(struct ir3_context *ctx,
1116 gl_system_value slot, unsigned compmask,
1117 struct ir3_instruction *instr)
1118 {
1119 struct ir3_shader_variant *so = ctx->so;
1120 unsigned r = regid(so->inputs_count, 0);
1121 unsigned n = so->inputs_count++;
1122
1123 so->inputs[n].sysval = true;
1124 so->inputs[n].slot = slot;
1125 so->inputs[n].compmask = compmask;
1126 so->inputs[n].regid = r;
1127 so->inputs[n].interpolate = INTERP_MODE_FLAT;
1128 so->total_in++;
1129
1130 ctx->ir->ninputs = MAX2(ctx->ir->ninputs, r + 1);
1131 ctx->ir->inputs[r] = instr;
1132 }
1133
1134 static void add_sysval_input(struct ir3_context *ctx, gl_system_value slot,
1135 struct ir3_instruction *instr)
1136 {
1137 add_sysval_input_compmask(ctx, slot, 0x1, instr);
1138 }
1139
1140 static struct ir3_instruction *
1141 get_barycentric_centroid(struct ir3_context *ctx)
1142 {
1143 if (!ctx->ij_centroid) {
1144 struct ir3_instruction *xy[2];
1145 struct ir3_instruction *ij;
1146
1147 ij = create_input_compmask(ctx, 0, 0x3);
1148 ir3_split_dest(ctx->block, xy, ij, 0, 2);
1149
1150 ctx->ij_centroid = ir3_create_collect(ctx, xy, 2);
1151
1152 add_sysval_input_compmask(ctx,
1153 SYSTEM_VALUE_BARYCENTRIC_CENTROID,
1154 0x3, ij);
1155 }
1156
1157 return ctx->ij_centroid;
1158 }
1159
1160 static struct ir3_instruction *
1161 get_barycentric_sample(struct ir3_context *ctx)
1162 {
1163 if (!ctx->ij_sample) {
1164 struct ir3_instruction *xy[2];
1165 struct ir3_instruction *ij;
1166
1167 ij = create_input_compmask(ctx, 0, 0x3);
1168 ir3_split_dest(ctx->block, xy, ij, 0, 2);
1169
1170 ctx->ij_sample = ir3_create_collect(ctx, xy, 2);
1171
1172 add_sysval_input_compmask(ctx,
1173 SYSTEM_VALUE_BARYCENTRIC_SAMPLE,
1174 0x3, ij);
1175 }
1176
1177 return ctx->ij_sample;
1178 }
1179
1180 static struct ir3_instruction *
1181 get_barycentric_pixel(struct ir3_context *ctx)
1182 {
1183 /* TODO when tgsi_to_nir supports "new-style" FS inputs switch
1184 * this to create ij_pixel only on demand:
1185 */
1186 return ctx->ij_pixel;
1187 }
1188
1189 static struct ir3_instruction *
1190 get_frag_coord(struct ir3_context *ctx)
1191 {
1192 if (!ctx->frag_coord) {
1193 struct ir3_block *b = ctx->block;
1194 struct ir3_instruction *xyzw[4];
1195 struct ir3_instruction *hw_frag_coord;
1196
1197 hw_frag_coord = create_input_compmask(ctx, 0, 0xf);
1198 ir3_split_dest(ctx->block, xyzw, hw_frag_coord, 0, 4);
1199
1200 /* for frag_coord.xy, we get unsigned values.. we need
1201 * to subtract (integer) 8 and divide by 16 (right-
1202 * shift by 4) then convert to float:
1203 *
1204 * sub.s tmp, src, 8
1205 * shr.b tmp, tmp, 4
1206 * mov.u32f32 dst, tmp
1207 *
1208 */
1209 for (int i = 0; i < 2; i++) {
1210 xyzw[i] = ir3_SUB_S(b, xyzw[i], 0,
1211 create_immed(b, 8), 0);
1212 xyzw[i] = ir3_SHR_B(b, xyzw[i], 0,
1213 create_immed(b, 4), 0);
1214 xyzw[i] = ir3_COV(b, xyzw[i], TYPE_U32, TYPE_F32);
1215 }
1216
1217 ctx->frag_coord = ir3_create_collect(ctx, xyzw, 4);
1218
1219 add_sysval_input_compmask(ctx,
1220 SYSTEM_VALUE_FRAG_COORD,
1221 0xf, hw_frag_coord);
1222
1223 ctx->so->frag_coord = true;
1224 }
1225
1226 return ctx->frag_coord;
1227 }
1228
1229 static void
1230 emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr)
1231 {
1232 const nir_intrinsic_info *info = &nir_intrinsic_infos[intr->intrinsic];
1233 struct ir3_instruction **dst;
1234 struct ir3_instruction * const *src;
1235 struct ir3_block *b = ctx->block;
1236 int idx, comp;
1237
1238 if (info->has_dest) {
1239 unsigned n = nir_intrinsic_dest_components(intr);
1240 dst = ir3_get_dst(ctx, &intr->dest, n);
1241 } else {
1242 dst = NULL;
1243 }
1244
1245 switch (intr->intrinsic) {
1246 case nir_intrinsic_load_uniform:
1247 idx = nir_intrinsic_base(intr);
1248 if (nir_src_is_const(intr->src[0])) {
1249 idx += nir_src_as_uint(intr->src[0]);
1250 for (int i = 0; i < intr->num_components; i++) {
1251 dst[i] = create_uniform_typed(b, idx + i,
1252 nir_dest_bit_size(intr->dest) < 32 ? TYPE_F16 : TYPE_F32);
1253 }
1254 } else {
1255 src = ir3_get_src(ctx, &intr->src[0]);
1256 for (int i = 0; i < intr->num_components; i++) {
1257 dst[i] = create_uniform_indirect(b, idx + i,
1258 ir3_get_addr(ctx, src[0], 1));
1259 }
1260 /* NOTE: if relative addressing is used, we set
1261 * constlen in the compiler (to worst-case value)
1262 * since we don't know in the assembler what the max
1263 * addr reg value can be:
1264 */
1265 ctx->so->constlen = MAX2(ctx->so->constlen,
1266 ctx->so->shader->ubo_state.size / 16);
1267 }
1268 break;
1269 case nir_intrinsic_load_ubo:
1270 emit_intrinsic_load_ubo(ctx, intr, dst);
1271 break;
1272 case nir_intrinsic_load_frag_coord:
1273 ir3_split_dest(b, dst, get_frag_coord(ctx), 0, 4);
1274 break;
1275 case nir_intrinsic_load_sample_pos_from_id: {
1276 /* NOTE: blob seems to always use TYPE_F16 and then cov.f16f32,
1277 * but that doesn't seem necessary.
1278 */
1279 struct ir3_instruction *offset =
1280 ir3_RGETPOS(b, ir3_get_src(ctx, &intr->src[0])[0], 0);
1281 offset->regs[0]->wrmask = 0x3;
1282 offset->cat5.type = TYPE_F32;
1283
1284 ir3_split_dest(b, dst, offset, 0, 2);
1285
1286 break;
1287 }
1288 case nir_intrinsic_load_size_ir3:
1289 if (!ctx->ij_size) {
1290 ctx->ij_size = create_input(ctx, 0);
1291
1292 add_sysval_input(ctx, SYSTEM_VALUE_BARYCENTRIC_SIZE,
1293 ctx->ij_size);
1294 }
1295 dst[0] = ctx->ij_size;
1296 break;
1297 case nir_intrinsic_load_barycentric_centroid:
1298 ir3_split_dest(b, dst, get_barycentric_centroid(ctx), 0, 2);
1299 break;
1300 case nir_intrinsic_load_barycentric_sample:
1301 if (ctx->so->key.msaa) {
1302 ir3_split_dest(b, dst, get_barycentric_sample(ctx), 0, 2);
1303 } else {
1304 ir3_split_dest(b, dst, get_barycentric_pixel(ctx), 0, 2);
1305 }
1306 break;
1307 case nir_intrinsic_load_barycentric_pixel:
1308 ir3_split_dest(b, dst, get_barycentric_pixel(ctx), 0, 2);
1309 break;
1310 case nir_intrinsic_load_interpolated_input:
1311 idx = nir_intrinsic_base(intr);
1312 comp = nir_intrinsic_component(intr);
1313 src = ir3_get_src(ctx, &intr->src[0]);
1314 if (nir_src_is_const(intr->src[1])) {
1315 struct ir3_instruction *coord = ir3_create_collect(ctx, src, 2);
1316 idx += nir_src_as_uint(intr->src[1]);
1317 for (int i = 0; i < intr->num_components; i++) {
1318 unsigned inloc = idx * 4 + i + comp;
1319 if (ctx->so->inputs[idx].bary &&
1320 !ctx->so->inputs[idx].use_ldlv) {
1321 dst[i] = ir3_BARY_F(b, create_immed(b, inloc), 0, coord, 0);
1322 } else {
1323 /* for non-varyings use the pre-setup input, since
1324 * that is easier than mapping things back to a
1325 * nir_variable to figure out what it is.
1326 */
1327 dst[i] = ctx->ir->inputs[inloc];
1328 }
1329 }
1330 } else {
1331 ir3_context_error(ctx, "unhandled");
1332 }
1333 break;
1334 case nir_intrinsic_load_input:
1335 idx = nir_intrinsic_base(intr);
1336 comp = nir_intrinsic_component(intr);
1337 if (nir_src_is_const(intr->src[0])) {
1338 idx += nir_src_as_uint(intr->src[0]);
1339 for (int i = 0; i < intr->num_components; i++) {
1340 unsigned n = idx * 4 + i + comp;
1341 dst[i] = ctx->ir->inputs[n];
1342 compile_assert(ctx, ctx->ir->inputs[n]);
1343 }
1344 } else {
1345 src = ir3_get_src(ctx, &intr->src[0]);
1346 struct ir3_instruction *collect =
1347 ir3_create_collect(ctx, ctx->ir->inputs, ctx->ir->ninputs);
1348 struct ir3_instruction *addr = ir3_get_addr(ctx, src[0], 4);
1349 for (int i = 0; i < intr->num_components; i++) {
1350 unsigned n = idx * 4 + i + comp;
1351 dst[i] = create_indirect_load(ctx, ctx->ir->ninputs,
1352 n, addr, collect);
1353 }
1354 }
1355 break;
1356 /* All SSBO intrinsics should have been lowered by 'lower_io_offsets'
1357 * pass and replaced by an ir3-specifc version that adds the
1358 * dword-offset in the last source.
1359 */
1360 case nir_intrinsic_load_ssbo_ir3:
1361 ctx->funcs->emit_intrinsic_load_ssbo(ctx, intr, dst);
1362 break;
1363 case nir_intrinsic_store_ssbo_ir3:
1364 if ((ctx->so->type == MESA_SHADER_FRAGMENT) &&
1365 !ctx->s->info.fs.early_fragment_tests)
1366 ctx->so->no_earlyz = true;
1367 ctx->funcs->emit_intrinsic_store_ssbo(ctx, intr);
1368 break;
1369 case nir_intrinsic_get_buffer_size:
1370 emit_intrinsic_ssbo_size(ctx, intr, dst);
1371 break;
1372 case nir_intrinsic_ssbo_atomic_add_ir3:
1373 case nir_intrinsic_ssbo_atomic_imin_ir3:
1374 case nir_intrinsic_ssbo_atomic_umin_ir3:
1375 case nir_intrinsic_ssbo_atomic_imax_ir3:
1376 case nir_intrinsic_ssbo_atomic_umax_ir3:
1377 case nir_intrinsic_ssbo_atomic_and_ir3:
1378 case nir_intrinsic_ssbo_atomic_or_ir3:
1379 case nir_intrinsic_ssbo_atomic_xor_ir3:
1380 case nir_intrinsic_ssbo_atomic_exchange_ir3:
1381 case nir_intrinsic_ssbo_atomic_comp_swap_ir3:
1382 if ((ctx->so->type == MESA_SHADER_FRAGMENT) &&
1383 !ctx->s->info.fs.early_fragment_tests)
1384 ctx->so->no_earlyz = true;
1385 dst[0] = ctx->funcs->emit_intrinsic_atomic_ssbo(ctx, intr);
1386 break;
1387 case nir_intrinsic_load_shared:
1388 emit_intrinsic_load_shared(ctx, intr, dst);
1389 break;
1390 case nir_intrinsic_store_shared:
1391 emit_intrinsic_store_shared(ctx, intr);
1392 break;
1393 case nir_intrinsic_shared_atomic_add:
1394 case nir_intrinsic_shared_atomic_imin:
1395 case nir_intrinsic_shared_atomic_umin:
1396 case nir_intrinsic_shared_atomic_imax:
1397 case nir_intrinsic_shared_atomic_umax:
1398 case nir_intrinsic_shared_atomic_and:
1399 case nir_intrinsic_shared_atomic_or:
1400 case nir_intrinsic_shared_atomic_xor:
1401 case nir_intrinsic_shared_atomic_exchange:
1402 case nir_intrinsic_shared_atomic_comp_swap:
1403 dst[0] = emit_intrinsic_atomic_shared(ctx, intr);
1404 break;
1405 case nir_intrinsic_image_deref_load:
1406 emit_intrinsic_load_image(ctx, intr, dst);
1407 break;
1408 case nir_intrinsic_image_deref_store:
1409 if ((ctx->so->type == MESA_SHADER_FRAGMENT) &&
1410 !ctx->s->info.fs.early_fragment_tests)
1411 ctx->so->no_earlyz = true;
1412 ctx->funcs->emit_intrinsic_store_image(ctx, intr);
1413 break;
1414 case nir_intrinsic_image_deref_size:
1415 emit_intrinsic_image_size(ctx, intr, dst);
1416 break;
1417 case nir_intrinsic_image_deref_atomic_add:
1418 case nir_intrinsic_image_deref_atomic_imin:
1419 case nir_intrinsic_image_deref_atomic_umin:
1420 case nir_intrinsic_image_deref_atomic_imax:
1421 case nir_intrinsic_image_deref_atomic_umax:
1422 case nir_intrinsic_image_deref_atomic_and:
1423 case nir_intrinsic_image_deref_atomic_or:
1424 case nir_intrinsic_image_deref_atomic_xor:
1425 case nir_intrinsic_image_deref_atomic_exchange:
1426 case nir_intrinsic_image_deref_atomic_comp_swap:
1427 if ((ctx->so->type == MESA_SHADER_FRAGMENT) &&
1428 !ctx->s->info.fs.early_fragment_tests)
1429 ctx->so->no_earlyz = true;
1430 dst[0] = ctx->funcs->emit_intrinsic_atomic_image(ctx, intr);
1431 break;
1432 case nir_intrinsic_barrier:
1433 case nir_intrinsic_memory_barrier:
1434 case nir_intrinsic_group_memory_barrier:
1435 case nir_intrinsic_memory_barrier_atomic_counter:
1436 case nir_intrinsic_memory_barrier_buffer:
1437 case nir_intrinsic_memory_barrier_image:
1438 case nir_intrinsic_memory_barrier_shared:
1439 emit_intrinsic_barrier(ctx, intr);
1440 /* note that blk ptr no longer valid, make that obvious: */
1441 b = NULL;
1442 break;
1443 case nir_intrinsic_store_output:
1444 idx = nir_intrinsic_base(intr);
1445 comp = nir_intrinsic_component(intr);
1446 compile_assert(ctx, nir_src_is_const(intr->src[1]));
1447 idx += nir_src_as_uint(intr->src[1]);
1448
1449 src = ir3_get_src(ctx, &intr->src[0]);
1450 for (int i = 0; i < intr->num_components; i++) {
1451 unsigned n = idx * 4 + i + comp;
1452 ctx->ir->outputs[n] = src[i];
1453 }
1454 break;
1455 case nir_intrinsic_load_base_vertex:
1456 case nir_intrinsic_load_first_vertex:
1457 if (!ctx->basevertex) {
1458 ctx->basevertex = create_driver_param(ctx, IR3_DP_VTXID_BASE);
1459 add_sysval_input(ctx, SYSTEM_VALUE_FIRST_VERTEX, ctx->basevertex);
1460 }
1461 dst[0] = ctx->basevertex;
1462 break;
1463 case nir_intrinsic_load_vertex_id_zero_base:
1464 case nir_intrinsic_load_vertex_id:
1465 if (!ctx->vertex_id) {
1466 gl_system_value sv = (intr->intrinsic == nir_intrinsic_load_vertex_id) ?
1467 SYSTEM_VALUE_VERTEX_ID : SYSTEM_VALUE_VERTEX_ID_ZERO_BASE;
1468 ctx->vertex_id = create_input(ctx, 0);
1469 add_sysval_input(ctx, sv, ctx->vertex_id);
1470 }
1471 dst[0] = ctx->vertex_id;
1472 break;
1473 case nir_intrinsic_load_instance_id:
1474 if (!ctx->instance_id) {
1475 ctx->instance_id = create_input(ctx, 0);
1476 add_sysval_input(ctx, SYSTEM_VALUE_INSTANCE_ID,
1477 ctx->instance_id);
1478 }
1479 dst[0] = ctx->instance_id;
1480 break;
1481 case nir_intrinsic_load_sample_id:
1482 ctx->so->per_samp = true;
1483 /* fall-thru */
1484 case nir_intrinsic_load_sample_id_no_per_sample:
1485 if (!ctx->samp_id) {
1486 ctx->samp_id = create_input(ctx, 0);
1487 ctx->samp_id->regs[0]->flags |= IR3_REG_HALF;
1488 add_sysval_input(ctx, SYSTEM_VALUE_SAMPLE_ID,
1489 ctx->samp_id);
1490 }
1491 dst[0] = ir3_COV(b, ctx->samp_id, TYPE_U16, TYPE_U32);
1492 break;
1493 case nir_intrinsic_load_sample_mask_in:
1494 if (!ctx->samp_mask_in) {
1495 ctx->samp_mask_in = create_input(ctx, 0);
1496 add_sysval_input(ctx, SYSTEM_VALUE_SAMPLE_MASK_IN,
1497 ctx->samp_mask_in);
1498 }
1499 dst[0] = ctx->samp_mask_in;
1500 break;
1501 case nir_intrinsic_load_user_clip_plane:
1502 idx = nir_intrinsic_ucp_id(intr);
1503 for (int i = 0; i < intr->num_components; i++) {
1504 unsigned n = idx * 4 + i;
1505 dst[i] = create_driver_param(ctx, IR3_DP_UCP0_X + n);
1506 }
1507 break;
1508 case nir_intrinsic_load_front_face:
1509 if (!ctx->frag_face) {
1510 ctx->so->frag_face = true;
1511 ctx->frag_face = create_input(ctx, 0);
1512 add_sysval_input(ctx, SYSTEM_VALUE_FRONT_FACE, ctx->frag_face);
1513 ctx->frag_face->regs[0]->flags |= IR3_REG_HALF;
1514 }
1515 /* for fragface, we get -1 for back and 0 for front. However this is
1516 * the inverse of what nir expects (where ~0 is true).
1517 */
1518 dst[0] = ir3_COV(b, ctx->frag_face, TYPE_S16, TYPE_S32);
1519 dst[0] = ir3_NOT_B(b, dst[0], 0);
1520 break;
1521 case nir_intrinsic_load_local_invocation_id:
1522 if (!ctx->local_invocation_id) {
1523 ctx->local_invocation_id = create_input_compmask(ctx, 0, 0x7);
1524 add_sysval_input_compmask(ctx, SYSTEM_VALUE_LOCAL_INVOCATION_ID,
1525 0x7, ctx->local_invocation_id);
1526 }
1527 ir3_split_dest(b, dst, ctx->local_invocation_id, 0, 3);
1528 break;
1529 case nir_intrinsic_load_work_group_id:
1530 if (!ctx->work_group_id) {
1531 ctx->work_group_id = create_input_compmask(ctx, 0, 0x7);
1532 add_sysval_input_compmask(ctx, SYSTEM_VALUE_WORK_GROUP_ID,
1533 0x7, ctx->work_group_id);
1534 ctx->work_group_id->regs[0]->flags |= IR3_REG_HIGH;
1535 }
1536 ir3_split_dest(b, dst, ctx->work_group_id, 0, 3);
1537 break;
1538 case nir_intrinsic_load_num_work_groups:
1539 for (int i = 0; i < intr->num_components; i++) {
1540 dst[i] = create_driver_param(ctx, IR3_DP_NUM_WORK_GROUPS_X + i);
1541 }
1542 break;
1543 case nir_intrinsic_load_local_group_size:
1544 for (int i = 0; i < intr->num_components; i++) {
1545 dst[i] = create_driver_param(ctx, IR3_DP_LOCAL_GROUP_SIZE_X + i);
1546 }
1547 break;
1548 case nir_intrinsic_discard_if:
1549 case nir_intrinsic_discard: {
1550 struct ir3_instruction *cond, *kill;
1551
1552 if (intr->intrinsic == nir_intrinsic_discard_if) {
1553 /* conditional discard: */
1554 src = ir3_get_src(ctx, &intr->src[0]);
1555 cond = ir3_b2n(b, src[0]);
1556 } else {
1557 /* unconditional discard: */
1558 cond = create_immed(b, 1);
1559 }
1560
1561 /* NOTE: only cmps.*.* can write p0.x: */
1562 cond = ir3_CMPS_S(b, cond, 0, create_immed(b, 0), 0);
1563 cond->cat2.condition = IR3_COND_NE;
1564
1565 /* condition always goes in predicate register: */
1566 cond->regs[0]->num = regid(REG_P0, 0);
1567
1568 kill = ir3_KILL(b, cond, 0);
1569 array_insert(ctx->ir, ctx->ir->predicates, kill);
1570
1571 array_insert(b, b->keeps, kill);
1572 ctx->so->no_earlyz = true;
1573
1574 break;
1575 }
1576 default:
1577 ir3_context_error(ctx, "Unhandled intrinsic type: %s\n",
1578 nir_intrinsic_infos[intr->intrinsic].name);
1579 break;
1580 }
1581
1582 if (info->has_dest)
1583 ir3_put_dst(ctx, &intr->dest);
1584 }
1585
1586 static void
1587 emit_load_const(struct ir3_context *ctx, nir_load_const_instr *instr)
1588 {
1589 struct ir3_instruction **dst = ir3_get_dst_ssa(ctx, &instr->def,
1590 instr->def.num_components);
1591
1592 if (instr->def.bit_size < 32) {
1593 for (int i = 0; i < instr->def.num_components; i++)
1594 dst[i] = create_immed_typed(ctx->block,
1595 instr->value[i].u16,
1596 TYPE_U16);
1597 } else {
1598 for (int i = 0; i < instr->def.num_components; i++)
1599 dst[i] = create_immed_typed(ctx->block,
1600 instr->value[i].u32,
1601 TYPE_U32);
1602 }
1603
1604 }
1605
1606 static void
1607 emit_undef(struct ir3_context *ctx, nir_ssa_undef_instr *undef)
1608 {
1609 struct ir3_instruction **dst = ir3_get_dst_ssa(ctx, &undef->def,
1610 undef->def.num_components);
1611 type_t type = (undef->def.bit_size < 32) ? TYPE_U16 : TYPE_U32;
1612
1613 /* backend doesn't want undefined instructions, so just plug
1614 * in 0.0..
1615 */
1616 for (int i = 0; i < undef->def.num_components; i++)
1617 dst[i] = create_immed_typed(ctx->block, fui(0.0), type);
1618 }
1619
1620 /*
1621 * texture fetch/sample instructions:
1622 */
1623
1624 static void
1625 tex_info(nir_tex_instr *tex, unsigned *flagsp, unsigned *coordsp)
1626 {
1627 unsigned coords, flags = 0;
1628
1629 /* note: would use tex->coord_components.. except txs.. also,
1630 * since array index goes after shadow ref, we don't want to
1631 * count it:
1632 */
1633 switch (tex->sampler_dim) {
1634 case GLSL_SAMPLER_DIM_1D:
1635 case GLSL_SAMPLER_DIM_BUF:
1636 coords = 1;
1637 break;
1638 case GLSL_SAMPLER_DIM_2D:
1639 case GLSL_SAMPLER_DIM_RECT:
1640 case GLSL_SAMPLER_DIM_EXTERNAL:
1641 case GLSL_SAMPLER_DIM_MS:
1642 coords = 2;
1643 break;
1644 case GLSL_SAMPLER_DIM_3D:
1645 case GLSL_SAMPLER_DIM_CUBE:
1646 coords = 3;
1647 flags |= IR3_INSTR_3D;
1648 break;
1649 default:
1650 unreachable("bad sampler_dim");
1651 }
1652
1653 if (tex->is_shadow && tex->op != nir_texop_lod)
1654 flags |= IR3_INSTR_S;
1655
1656 if (tex->is_array && tex->op != nir_texop_lod)
1657 flags |= IR3_INSTR_A;
1658
1659 *flagsp = flags;
1660 *coordsp = coords;
1661 }
1662
1663 /* Gets the sampler/texture idx as a hvec2. Which could either be dynamic
1664 * or immediate (in which case it will get lowered later to a non .s2en
1665 * version of the tex instruction which encode tex/samp as immediates:
1666 */
1667 static struct ir3_instruction *
1668 get_tex_samp_tex_src(struct ir3_context *ctx, nir_tex_instr *tex)
1669 {
1670 int texture_idx = nir_tex_instr_src_index(tex, nir_tex_src_texture_offset);
1671 int sampler_idx = nir_tex_instr_src_index(tex, nir_tex_src_sampler_offset);
1672 struct ir3_instruction *texture, *sampler;
1673
1674 if (texture_idx >= 0) {
1675 texture = ir3_get_src(ctx, &tex->src[texture_idx].src)[0];
1676 texture = ir3_COV(ctx->block, texture, TYPE_U32, TYPE_U16);
1677 } else {
1678 /* TODO what to do for dynamic case? I guess we only need the
1679 * max index for astc srgb workaround so maybe not a problem
1680 * to worry about if we don't enable indirect samplers for
1681 * a4xx?
1682 */
1683 ctx->max_texture_index = MAX2(ctx->max_texture_index, tex->texture_index);
1684 texture = create_immed_typed(ctx->block, tex->texture_index, TYPE_U16);
1685 }
1686
1687 if (sampler_idx >= 0) {
1688 sampler = ir3_get_src(ctx, &tex->src[sampler_idx].src)[0];
1689 sampler = ir3_COV(ctx->block, sampler, TYPE_U32, TYPE_U16);
1690 } else {
1691 sampler = create_immed_typed(ctx->block, tex->sampler_index, TYPE_U16);
1692 }
1693
1694 return ir3_create_collect(ctx, (struct ir3_instruction*[]){
1695 sampler,
1696 texture,
1697 }, 2);
1698 }
1699
1700 static void
1701 emit_tex(struct ir3_context *ctx, nir_tex_instr *tex)
1702 {
1703 struct ir3_block *b = ctx->block;
1704 struct ir3_instruction **dst, *sam, *src0[12], *src1[4];
1705 struct ir3_instruction * const *coord, * const *off, * const *ddx, * const *ddy;
1706 struct ir3_instruction *lod, *compare, *proj, *sample_index;
1707 bool has_bias = false, has_lod = false, has_proj = false, has_off = false;
1708 unsigned i, coords, flags, ncomp;
1709 unsigned nsrc0 = 0, nsrc1 = 0;
1710 type_t type;
1711 opc_t opc = 0;
1712
1713 ncomp = nir_dest_num_components(tex->dest);
1714
1715 coord = off = ddx = ddy = NULL;
1716 lod = proj = compare = sample_index = NULL;
1717
1718 dst = ir3_get_dst(ctx, &tex->dest, ncomp);
1719
1720 for (unsigned i = 0; i < tex->num_srcs; i++) {
1721 switch (tex->src[i].src_type) {
1722 case nir_tex_src_coord:
1723 coord = ir3_get_src(ctx, &tex->src[i].src);
1724 break;
1725 case nir_tex_src_bias:
1726 lod = ir3_get_src(ctx, &tex->src[i].src)[0];
1727 has_bias = true;
1728 break;
1729 case nir_tex_src_lod:
1730 lod = ir3_get_src(ctx, &tex->src[i].src)[0];
1731 has_lod = true;
1732 break;
1733 case nir_tex_src_comparator: /* shadow comparator */
1734 compare = ir3_get_src(ctx, &tex->src[i].src)[0];
1735 break;
1736 case nir_tex_src_projector:
1737 proj = ir3_get_src(ctx, &tex->src[i].src)[0];
1738 has_proj = true;
1739 break;
1740 case nir_tex_src_offset:
1741 off = ir3_get_src(ctx, &tex->src[i].src);
1742 has_off = true;
1743 break;
1744 case nir_tex_src_ddx:
1745 ddx = ir3_get_src(ctx, &tex->src[i].src);
1746 break;
1747 case nir_tex_src_ddy:
1748 ddy = ir3_get_src(ctx, &tex->src[i].src);
1749 break;
1750 case nir_tex_src_ms_index:
1751 sample_index = ir3_get_src(ctx, &tex->src[i].src)[0];
1752 break;
1753 case nir_tex_src_texture_offset:
1754 case nir_tex_src_sampler_offset:
1755 /* handled in get_tex_samp_src() */
1756 break;
1757 default:
1758 ir3_context_error(ctx, "Unhandled NIR tex src type: %d\n",
1759 tex->src[i].src_type);
1760 return;
1761 }
1762 }
1763
1764 switch (tex->op) {
1765 case nir_texop_tex: opc = has_lod ? OPC_SAML : OPC_SAM; break;
1766 case nir_texop_txb: opc = OPC_SAMB; break;
1767 case nir_texop_txl: opc = OPC_SAML; break;
1768 case nir_texop_txd: opc = OPC_SAMGQ; break;
1769 case nir_texop_txf: opc = OPC_ISAML; break;
1770 case nir_texop_lod: opc = OPC_GETLOD; break;
1771 case nir_texop_tg4:
1772 /* NOTE: a4xx might need to emulate gather w/ txf (this is
1773 * what blob does, seems gather is broken?), and a3xx did
1774 * not support it (but probably could also emulate).
1775 */
1776 switch (tex->component) {
1777 case 0: opc = OPC_GATHER4R; break;
1778 case 1: opc = OPC_GATHER4G; break;
1779 case 2: opc = OPC_GATHER4B; break;
1780 case 3: opc = OPC_GATHER4A; break;
1781 }
1782 break;
1783 case nir_texop_txf_ms_fb:
1784 case nir_texop_txf_ms: opc = OPC_ISAMM; break;
1785 default:
1786 ir3_context_error(ctx, "Unhandled NIR tex type: %d\n", tex->op);
1787 return;
1788 }
1789
1790 tex_info(tex, &flags, &coords);
1791
1792 /*
1793 * lay out the first argument in the proper order:
1794 * - actual coordinates first
1795 * - shadow reference
1796 * - array index
1797 * - projection w
1798 * - starting at offset 4, dpdx.xy, dpdy.xy
1799 *
1800 * bias/lod go into the second arg
1801 */
1802
1803 /* insert tex coords: */
1804 for (i = 0; i < coords; i++)
1805 src0[i] = coord[i];
1806
1807 nsrc0 = i;
1808
1809 /* scale up integer coords for TXF based on the LOD */
1810 if (ctx->compiler->unminify_coords && (opc == OPC_ISAML)) {
1811 assert(has_lod);
1812 for (i = 0; i < coords; i++)
1813 src0[i] = ir3_SHL_B(b, src0[i], 0, lod, 0);
1814 }
1815
1816 if (coords == 1) {
1817 /* hw doesn't do 1d, so we treat it as 2d with
1818 * height of 1, and patch up the y coord.
1819 */
1820 if (is_isam(opc)) {
1821 src0[nsrc0++] = create_immed(b, 0);
1822 } else {
1823 src0[nsrc0++] = create_immed(b, fui(0.5));
1824 }
1825 }
1826
1827 if (tex->is_shadow && tex->op != nir_texop_lod)
1828 src0[nsrc0++] = compare;
1829
1830 if (tex->is_array && tex->op != nir_texop_lod) {
1831 struct ir3_instruction *idx = coord[coords];
1832
1833 /* the array coord for cube arrays needs 0.5 added to it */
1834 if (ctx->compiler->array_index_add_half && !is_isam(opc))
1835 idx = ir3_ADD_F(b, idx, 0, create_immed(b, fui(0.5)), 0);
1836
1837 src0[nsrc0++] = idx;
1838 }
1839
1840 if (has_proj) {
1841 src0[nsrc0++] = proj;
1842 flags |= IR3_INSTR_P;
1843 }
1844
1845 /* pad to 4, then ddx/ddy: */
1846 if (tex->op == nir_texop_txd) {
1847 while (nsrc0 < 4)
1848 src0[nsrc0++] = create_immed(b, fui(0.0));
1849 for (i = 0; i < coords; i++)
1850 src0[nsrc0++] = ddx[i];
1851 if (coords < 2)
1852 src0[nsrc0++] = create_immed(b, fui(0.0));
1853 for (i = 0; i < coords; i++)
1854 src0[nsrc0++] = ddy[i];
1855 if (coords < 2)
1856 src0[nsrc0++] = create_immed(b, fui(0.0));
1857 }
1858
1859 /* NOTE a3xx (and possibly a4xx?) might be different, using isaml
1860 * with scaled x coord according to requested sample:
1861 */
1862 if (opc == OPC_ISAMM) {
1863 if (ctx->compiler->txf_ms_with_isaml) {
1864 /* the samples are laid out in x dimension as
1865 * 0 1 2 3
1866 * x_ms = (x << ms) + sample_index;
1867 */
1868 struct ir3_instruction *ms;
1869 ms = create_immed(b, (ctx->samples >> (2 * tex->texture_index)) & 3);
1870
1871 src0[0] = ir3_SHL_B(b, src0[0], 0, ms, 0);
1872 src0[0] = ir3_ADD_U(b, src0[0], 0, sample_index, 0);
1873
1874 opc = OPC_ISAML;
1875 } else {
1876 src0[nsrc0++] = sample_index;
1877 }
1878 }
1879
1880 /*
1881 * second argument (if applicable):
1882 * - offsets
1883 * - lod
1884 * - bias
1885 */
1886 if (has_off | has_lod | has_bias) {
1887 if (has_off) {
1888 unsigned off_coords = coords;
1889 if (tex->sampler_dim == GLSL_SAMPLER_DIM_CUBE)
1890 off_coords--;
1891 for (i = 0; i < off_coords; i++)
1892 src1[nsrc1++] = off[i];
1893 if (off_coords < 2)
1894 src1[nsrc1++] = create_immed(b, fui(0.0));
1895 flags |= IR3_INSTR_O;
1896 }
1897
1898 if (has_lod | has_bias)
1899 src1[nsrc1++] = lod;
1900 }
1901
1902 switch (tex->dest_type) {
1903 case nir_type_invalid:
1904 case nir_type_float:
1905 type = TYPE_F32;
1906 break;
1907 case nir_type_int:
1908 type = TYPE_S32;
1909 break;
1910 case nir_type_uint:
1911 case nir_type_bool:
1912 type = TYPE_U32;
1913 break;
1914 default:
1915 unreachable("bad dest_type");
1916 }
1917
1918 if (opc == OPC_GETLOD)
1919 type = TYPE_U32;
1920
1921 struct ir3_instruction *samp_tex;
1922
1923 if (tex->op == nir_texop_txf_ms_fb) {
1924 /* only expect a single txf_ms_fb per shader: */
1925 compile_assert(ctx, !ctx->so->fb_read);
1926 compile_assert(ctx, ctx->so->type == MESA_SHADER_FRAGMENT);
1927
1928 ctx->so->fb_read = true;
1929 samp_tex = ir3_create_collect(ctx, (struct ir3_instruction*[]){
1930 create_immed_typed(ctx->block, ctx->so->num_samp, TYPE_U16),
1931 create_immed_typed(ctx->block, ctx->so->num_samp, TYPE_U16),
1932 }, 2);
1933
1934 ctx->so->num_samp++;
1935 } else {
1936 samp_tex = get_tex_samp_tex_src(ctx, tex);
1937 }
1938
1939 struct ir3_instruction *col0 = ir3_create_collect(ctx, src0, nsrc0);
1940 struct ir3_instruction *col1 = ir3_create_collect(ctx, src1, nsrc1);
1941
1942 sam = ir3_SAM(b, opc, type, MASK(ncomp), flags,
1943 samp_tex, col0, col1);
1944
1945 if ((ctx->astc_srgb & (1 << tex->texture_index)) && !nir_tex_instr_is_query(tex)) {
1946 /* only need first 3 components: */
1947 sam->regs[0]->wrmask = 0x7;
1948 ir3_split_dest(b, dst, sam, 0, 3);
1949
1950 /* we need to sample the alpha separately with a non-ASTC
1951 * texture state:
1952 */
1953 sam = ir3_SAM(b, opc, type, 0b1000, flags,
1954 samp_tex, col0, col1);
1955
1956 array_insert(ctx->ir, ctx->ir->astc_srgb, sam);
1957
1958 /* fixup .w component: */
1959 ir3_split_dest(b, &dst[3], sam, 3, 1);
1960 } else {
1961 /* normal (non-workaround) case: */
1962 ir3_split_dest(b, dst, sam, 0, ncomp);
1963 }
1964
1965 /* GETLOD returns results in 4.8 fixed point */
1966 if (opc == OPC_GETLOD) {
1967 struct ir3_instruction *factor = create_immed(b, fui(1.0 / 256));
1968
1969 compile_assert(ctx, tex->dest_type == nir_type_float);
1970 for (i = 0; i < 2; i++) {
1971 dst[i] = ir3_MUL_F(b, ir3_COV(b, dst[i], TYPE_U32, TYPE_F32), 0,
1972 factor, 0);
1973 }
1974 }
1975
1976 ir3_put_dst(ctx, &tex->dest);
1977 }
1978
1979 static void
1980 emit_tex_query_levels(struct ir3_context *ctx, nir_tex_instr *tex)
1981 {
1982 struct ir3_block *b = ctx->block;
1983 struct ir3_instruction **dst, *sam;
1984
1985 dst = ir3_get_dst(ctx, &tex->dest, 1);
1986
1987 sam = ir3_SAM(b, OPC_GETINFO, TYPE_U32, 0b0100, 0,
1988 get_tex_samp_tex_src(ctx, tex), NULL, NULL);
1989
1990 /* even though there is only one component, since it ends
1991 * up in .z rather than .x, we need a split_dest()
1992 */
1993 ir3_split_dest(b, dst, sam, 0, 3);
1994
1995 /* The # of levels comes from getinfo.z. We need to add 1 to it, since
1996 * the value in TEX_CONST_0 is zero-based.
1997 */
1998 if (ctx->compiler->levels_add_one)
1999 dst[0] = ir3_ADD_U(b, dst[0], 0, create_immed(b, 1), 0);
2000
2001 ir3_put_dst(ctx, &tex->dest);
2002 }
2003
2004 static void
2005 emit_tex_txs(struct ir3_context *ctx, nir_tex_instr *tex)
2006 {
2007 struct ir3_block *b = ctx->block;
2008 struct ir3_instruction **dst, *sam;
2009 struct ir3_instruction *lod;
2010 unsigned flags, coords;
2011
2012 tex_info(tex, &flags, &coords);
2013
2014 /* Actually we want the number of dimensions, not coordinates. This
2015 * distinction only matters for cubes.
2016 */
2017 if (tex->sampler_dim == GLSL_SAMPLER_DIM_CUBE)
2018 coords = 2;
2019
2020 dst = ir3_get_dst(ctx, &tex->dest, 4);
2021
2022 compile_assert(ctx, tex->num_srcs == 1);
2023 compile_assert(ctx, tex->src[0].src_type == nir_tex_src_lod);
2024
2025 lod = ir3_get_src(ctx, &tex->src[0].src)[0];
2026
2027 sam = ir3_SAM(b, OPC_GETSIZE, TYPE_U32, 0b1111, flags,
2028 get_tex_samp_tex_src(ctx, tex), lod, NULL);
2029
2030 ir3_split_dest(b, dst, sam, 0, 4);
2031
2032 /* Array size actually ends up in .w rather than .z. This doesn't
2033 * matter for miplevel 0, but for higher mips the value in z is
2034 * minified whereas w stays. Also, the value in TEX_CONST_3_DEPTH is
2035 * returned, which means that we have to add 1 to it for arrays.
2036 */
2037 if (tex->is_array) {
2038 if (ctx->compiler->levels_add_one) {
2039 dst[coords] = ir3_ADD_U(b, dst[3], 0, create_immed(b, 1), 0);
2040 } else {
2041 dst[coords] = ir3_MOV(b, dst[3], TYPE_U32);
2042 }
2043 }
2044
2045 ir3_put_dst(ctx, &tex->dest);
2046 }
2047
2048 static void
2049 emit_jump(struct ir3_context *ctx, nir_jump_instr *jump)
2050 {
2051 switch (jump->type) {
2052 case nir_jump_break:
2053 case nir_jump_continue:
2054 case nir_jump_return:
2055 /* I *think* we can simply just ignore this, and use the
2056 * successor block link to figure out where we need to
2057 * jump to for break/continue
2058 */
2059 break;
2060 default:
2061 ir3_context_error(ctx, "Unhandled NIR jump type: %d\n", jump->type);
2062 break;
2063 }
2064 }
2065
2066 static void
2067 emit_instr(struct ir3_context *ctx, nir_instr *instr)
2068 {
2069 switch (instr->type) {
2070 case nir_instr_type_alu:
2071 emit_alu(ctx, nir_instr_as_alu(instr));
2072 break;
2073 case nir_instr_type_deref:
2074 /* ignored, handled as part of the intrinsic they are src to */
2075 break;
2076 case nir_instr_type_intrinsic:
2077 emit_intrinsic(ctx, nir_instr_as_intrinsic(instr));
2078 break;
2079 case nir_instr_type_load_const:
2080 emit_load_const(ctx, nir_instr_as_load_const(instr));
2081 break;
2082 case nir_instr_type_ssa_undef:
2083 emit_undef(ctx, nir_instr_as_ssa_undef(instr));
2084 break;
2085 case nir_instr_type_tex: {
2086 nir_tex_instr *tex = nir_instr_as_tex(instr);
2087 /* couple tex instructions get special-cased:
2088 */
2089 switch (tex->op) {
2090 case nir_texop_txs:
2091 emit_tex_txs(ctx, tex);
2092 break;
2093 case nir_texop_query_levels:
2094 emit_tex_query_levels(ctx, tex);
2095 break;
2096 default:
2097 emit_tex(ctx, tex);
2098 break;
2099 }
2100 break;
2101 }
2102 case nir_instr_type_jump:
2103 emit_jump(ctx, nir_instr_as_jump(instr));
2104 break;
2105 case nir_instr_type_phi:
2106 /* we have converted phi webs to regs in NIR by now */
2107 ir3_context_error(ctx, "Unexpected NIR instruction type: %d\n", instr->type);
2108 break;
2109 case nir_instr_type_call:
2110 case nir_instr_type_parallel_copy:
2111 ir3_context_error(ctx, "Unhandled NIR instruction type: %d\n", instr->type);
2112 break;
2113 }
2114 }
2115
2116 static struct ir3_block *
2117 get_block(struct ir3_context *ctx, const nir_block *nblock)
2118 {
2119 struct ir3_block *block;
2120 struct hash_entry *hentry;
2121
2122 hentry = _mesa_hash_table_search(ctx->block_ht, nblock);
2123 if (hentry)
2124 return hentry->data;
2125
2126 block = ir3_block_create(ctx->ir);
2127 block->nblock = nblock;
2128 _mesa_hash_table_insert(ctx->block_ht, nblock, block);
2129
2130 block->predecessors = _mesa_pointer_set_create(block);
2131 set_foreach(nblock->predecessors, sentry) {
2132 _mesa_set_add(block->predecessors, get_block(ctx, sentry->key));
2133 }
2134
2135 return block;
2136 }
2137
2138 static void
2139 emit_block(struct ir3_context *ctx, nir_block *nblock)
2140 {
2141 struct ir3_block *block = get_block(ctx, nblock);
2142
2143 for (int i = 0; i < ARRAY_SIZE(block->successors); i++) {
2144 if (nblock->successors[i]) {
2145 block->successors[i] =
2146 get_block(ctx, nblock->successors[i]);
2147 }
2148 }
2149
2150 ctx->block = block;
2151 list_addtail(&block->node, &ctx->ir->block_list);
2152
2153 /* re-emit addr register in each block if needed: */
2154 for (int i = 0; i < ARRAY_SIZE(ctx->addr_ht); i++) {
2155 _mesa_hash_table_destroy(ctx->addr_ht[i], NULL);
2156 ctx->addr_ht[i] = NULL;
2157 }
2158
2159 nir_foreach_instr(instr, nblock) {
2160 ctx->cur_instr = instr;
2161 emit_instr(ctx, instr);
2162 ctx->cur_instr = NULL;
2163 if (ctx->error)
2164 return;
2165 }
2166 }
2167
2168 static void emit_cf_list(struct ir3_context *ctx, struct exec_list *list);
2169
2170 static void
2171 emit_if(struct ir3_context *ctx, nir_if *nif)
2172 {
2173 struct ir3_instruction *condition = ir3_get_src(ctx, &nif->condition)[0];
2174
2175 ctx->block->condition =
2176 ir3_get_predicate(ctx, ir3_b2n(condition->block, condition));
2177
2178 emit_cf_list(ctx, &nif->then_list);
2179 emit_cf_list(ctx, &nif->else_list);
2180 }
2181
2182 static void
2183 emit_loop(struct ir3_context *ctx, nir_loop *nloop)
2184 {
2185 emit_cf_list(ctx, &nloop->body);
2186 ctx->so->loops++;
2187 }
2188
2189 static void
2190 stack_push(struct ir3_context *ctx)
2191 {
2192 ctx->stack++;
2193 ctx->max_stack = MAX2(ctx->max_stack, ctx->stack);
2194 }
2195
2196 static void
2197 stack_pop(struct ir3_context *ctx)
2198 {
2199 compile_assert(ctx, ctx->stack > 0);
2200 ctx->stack--;
2201 }
2202
2203 static void
2204 emit_cf_list(struct ir3_context *ctx, struct exec_list *list)
2205 {
2206 foreach_list_typed(nir_cf_node, node, node, list) {
2207 switch (node->type) {
2208 case nir_cf_node_block:
2209 emit_block(ctx, nir_cf_node_as_block(node));
2210 break;
2211 case nir_cf_node_if:
2212 stack_push(ctx);
2213 emit_if(ctx, nir_cf_node_as_if(node));
2214 stack_pop(ctx);
2215 break;
2216 case nir_cf_node_loop:
2217 stack_push(ctx);
2218 emit_loop(ctx, nir_cf_node_as_loop(node));
2219 stack_pop(ctx);
2220 break;
2221 case nir_cf_node_function:
2222 ir3_context_error(ctx, "TODO\n");
2223 break;
2224 }
2225 }
2226 }
2227
2228 /* emit stream-out code. At this point, the current block is the original
2229 * (nir) end block, and nir ensures that all flow control paths terminate
2230 * into the end block. We re-purpose the original end block to generate
2231 * the 'if (vtxcnt < maxvtxcnt)' condition, then append the conditional
2232 * block holding stream-out write instructions, followed by the new end
2233 * block:
2234 *
2235 * blockOrigEnd {
2236 * p0.x = (vtxcnt < maxvtxcnt)
2237 * // succs: blockStreamOut, blockNewEnd
2238 * }
2239 * blockStreamOut {
2240 * ... stream-out instructions ...
2241 * // succs: blockNewEnd
2242 * }
2243 * blockNewEnd {
2244 * }
2245 */
2246 static void
2247 emit_stream_out(struct ir3_context *ctx)
2248 {
2249 struct ir3 *ir = ctx->ir;
2250 struct ir3_stream_output_info *strmout =
2251 &ctx->so->shader->stream_output;
2252 struct ir3_block *orig_end_block, *stream_out_block, *new_end_block;
2253 struct ir3_instruction *vtxcnt, *maxvtxcnt, *cond;
2254 struct ir3_instruction *bases[IR3_MAX_SO_BUFFERS];
2255
2256 /* create vtxcnt input in input block at top of shader,
2257 * so that it is seen as live over the entire duration
2258 * of the shader:
2259 */
2260 vtxcnt = create_input(ctx, 0);
2261 add_sysval_input(ctx, SYSTEM_VALUE_VERTEX_CNT, vtxcnt);
2262
2263 maxvtxcnt = create_driver_param(ctx, IR3_DP_VTXCNT_MAX);
2264
2265 /* at this point, we are at the original 'end' block,
2266 * re-purpose this block to stream-out condition, then
2267 * append stream-out block and new-end block
2268 */
2269 orig_end_block = ctx->block;
2270
2271 // TODO these blocks need to update predecessors..
2272 // maybe w/ store_global intrinsic, we could do this
2273 // stuff in nir->nir pass
2274
2275 stream_out_block = ir3_block_create(ir);
2276 list_addtail(&stream_out_block->node, &ir->block_list);
2277
2278 new_end_block = ir3_block_create(ir);
2279 list_addtail(&new_end_block->node, &ir->block_list);
2280
2281 orig_end_block->successors[0] = stream_out_block;
2282 orig_end_block->successors[1] = new_end_block;
2283 stream_out_block->successors[0] = new_end_block;
2284
2285 /* setup 'if (vtxcnt < maxvtxcnt)' condition: */
2286 cond = ir3_CMPS_S(ctx->block, vtxcnt, 0, maxvtxcnt, 0);
2287 cond->regs[0]->num = regid(REG_P0, 0);
2288 cond->cat2.condition = IR3_COND_LT;
2289
2290 /* condition goes on previous block to the conditional,
2291 * since it is used to pick which of the two successor
2292 * paths to take:
2293 */
2294 orig_end_block->condition = cond;
2295
2296 /* switch to stream_out_block to generate the stream-out
2297 * instructions:
2298 */
2299 ctx->block = stream_out_block;
2300
2301 /* Calculate base addresses based on vtxcnt. Instructions
2302 * generated for bases not used in following loop will be
2303 * stripped out in the backend.
2304 */
2305 for (unsigned i = 0; i < IR3_MAX_SO_BUFFERS; i++) {
2306 struct ir3_const_state *const_state = &ctx->so->shader->const_state;
2307 unsigned stride = strmout->stride[i];
2308 struct ir3_instruction *base, *off;
2309
2310 base = create_uniform(ctx->block, regid(const_state->offsets.tfbo, i));
2311
2312 /* 24-bit should be enough: */
2313 off = ir3_MUL_U(ctx->block, vtxcnt, 0,
2314 create_immed(ctx->block, stride * 4), 0);
2315
2316 bases[i] = ir3_ADD_S(ctx->block, off, 0, base, 0);
2317 }
2318
2319 /* Generate the per-output store instructions: */
2320 for (unsigned i = 0; i < strmout->num_outputs; i++) {
2321 for (unsigned j = 0; j < strmout->output[i].num_components; j++) {
2322 unsigned c = j + strmout->output[i].start_component;
2323 struct ir3_instruction *base, *out, *stg;
2324
2325 base = bases[strmout->output[i].output_buffer];
2326 out = ctx->ir->outputs[regid(strmout->output[i].register_index, c)];
2327
2328 stg = ir3_STG(ctx->block, base, 0, out, 0,
2329 create_immed(ctx->block, 1), 0);
2330 stg->cat6.type = TYPE_U32;
2331 stg->cat6.dst_offset = (strmout->output[i].dst_offset + j) * 4;
2332
2333 array_insert(ctx->block, ctx->block->keeps, stg);
2334 }
2335 }
2336
2337 /* and finally switch to the new_end_block: */
2338 ctx->block = new_end_block;
2339 }
2340
2341 static void
2342 emit_function(struct ir3_context *ctx, nir_function_impl *impl)
2343 {
2344 nir_metadata_require(impl, nir_metadata_block_index);
2345
2346 compile_assert(ctx, ctx->stack == 0);
2347
2348 emit_cf_list(ctx, &impl->body);
2349 emit_block(ctx, impl->end_block);
2350
2351 compile_assert(ctx, ctx->stack == 0);
2352
2353 /* at this point, we should have a single empty block,
2354 * into which we emit the 'end' instruction.
2355 */
2356 compile_assert(ctx, list_empty(&ctx->block->instr_list));
2357
2358 /* If stream-out (aka transform-feedback) enabled, emit the
2359 * stream-out instructions, followed by a new empty block (into
2360 * which the 'end' instruction lands).
2361 *
2362 * NOTE: it is done in this order, rather than inserting before
2363 * we emit end_block, because NIR guarantees that all blocks
2364 * flow into end_block, and that end_block has no successors.
2365 * So by re-purposing end_block as the first block of stream-
2366 * out, we guarantee that all exit paths flow into the stream-
2367 * out instructions.
2368 */
2369 if ((ctx->compiler->gpu_id < 500) &&
2370 (ctx->so->shader->stream_output.num_outputs > 0) &&
2371 !ctx->so->binning_pass) {
2372 debug_assert(ctx->so->type == MESA_SHADER_VERTEX);
2373 emit_stream_out(ctx);
2374 }
2375
2376 ir3_END(ctx->block);
2377 }
2378
2379 static void
2380 setup_input(struct ir3_context *ctx, nir_variable *in)
2381 {
2382 struct ir3_shader_variant *so = ctx->so;
2383 unsigned ncomp = glsl_get_components(in->type);
2384 unsigned n = in->data.driver_location;
2385 unsigned frac = in->data.location_frac;
2386 unsigned slot = in->data.location;
2387
2388 /* skip unread inputs, we could end up with (for example), unsplit
2389 * matrix/etc inputs in the case they are not read, so just silently
2390 * skip these.
2391 */
2392 if (ncomp > 4)
2393 return;
2394
2395 so->inputs[n].slot = slot;
2396 so->inputs[n].compmask = (1 << (ncomp + frac)) - 1;
2397 so->inputs_count = MAX2(so->inputs_count, n + 1);
2398 so->inputs[n].interpolate = in->data.interpolation;
2399 so->inputs[n].ncomp = ncomp;
2400
2401 if (ctx->so->type == MESA_SHADER_FRAGMENT) {
2402
2403 /* if any varyings have 'sample' qualifer, that triggers us
2404 * to run in per-sample mode:
2405 */
2406 so->per_samp |= in->data.sample;
2407
2408 for (int i = 0; i < ncomp; i++) {
2409 struct ir3_instruction *instr = NULL;
2410 unsigned idx = (n * 4) + i + frac;
2411
2412 if (slot == VARYING_SLOT_POS) {
2413 ir3_context_error(ctx, "fragcoord should be a sysval!\n");
2414 } else if (slot == VARYING_SLOT_PNTC) {
2415 /* see for example st_nir_fixup_varying_slots().. this is
2416 * maybe a bit mesa/st specific. But we need things to line
2417 * up for this in fdN_program:
2418 * unsigned texmask = 1 << (slot - VARYING_SLOT_VAR0);
2419 * if (emit->sprite_coord_enable & texmask) {
2420 * ...
2421 * }
2422 */
2423 so->inputs[n].slot = VARYING_SLOT_VAR8;
2424 so->inputs[n].bary = true;
2425 instr = create_frag_input(ctx, false, idx);
2426 } else {
2427 /* detect the special case for front/back colors where
2428 * we need to do flat vs smooth shading depending on
2429 * rast state:
2430 */
2431 if (in->data.interpolation == INTERP_MODE_NONE) {
2432 switch (slot) {
2433 case VARYING_SLOT_COL0:
2434 case VARYING_SLOT_COL1:
2435 case VARYING_SLOT_BFC0:
2436 case VARYING_SLOT_BFC1:
2437 so->inputs[n].rasterflat = true;
2438 break;
2439 default:
2440 break;
2441 }
2442 }
2443
2444 if (ctx->compiler->flat_bypass) {
2445 if ((so->inputs[n].interpolate == INTERP_MODE_FLAT) ||
2446 (so->inputs[n].rasterflat && ctx->so->key.rasterflat))
2447 so->inputs[n].use_ldlv = true;
2448 }
2449
2450 so->inputs[n].bary = true;
2451
2452 instr = create_frag_input(ctx, so->inputs[n].use_ldlv, idx);
2453 }
2454
2455 compile_assert(ctx, idx < ctx->ir->ninputs);
2456
2457 ctx->ir->inputs[idx] = instr;
2458 }
2459 } else if (ctx->so->type == MESA_SHADER_VERTEX) {
2460 for (int i = 0; i < ncomp; i++) {
2461 unsigned idx = (n * 4) + i + frac;
2462 compile_assert(ctx, idx < ctx->ir->ninputs);
2463 ctx->ir->inputs[idx] = create_input(ctx, idx);
2464 }
2465 } else {
2466 ir3_context_error(ctx, "unknown shader type: %d\n", ctx->so->type);
2467 }
2468
2469 if (so->inputs[n].bary || (ctx->so->type == MESA_SHADER_VERTEX)) {
2470 so->total_in += ncomp;
2471 }
2472 }
2473
2474 /* Initially we assign non-packed inloc's for varyings, as we don't really
2475 * know up-front which components will be unused. After all the compilation
2476 * stages we scan the shader to see which components are actually used, and
2477 * re-pack the inlocs to eliminate unneeded varyings.
2478 */
2479 static void
2480 pack_inlocs(struct ir3_context *ctx)
2481 {
2482 struct ir3_shader_variant *so = ctx->so;
2483 uint8_t used_components[so->inputs_count];
2484
2485 memset(used_components, 0, sizeof(used_components));
2486
2487 /*
2488 * First Step: scan shader to find which bary.f/ldlv remain:
2489 */
2490
2491 list_for_each_entry (struct ir3_block, block, &ctx->ir->block_list, node) {
2492 list_for_each_entry (struct ir3_instruction, instr, &block->instr_list, node) {
2493 if (is_input(instr)) {
2494 unsigned inloc = instr->regs[1]->iim_val;
2495 unsigned i = inloc / 4;
2496 unsigned j = inloc % 4;
2497
2498 compile_assert(ctx, instr->regs[1]->flags & IR3_REG_IMMED);
2499 compile_assert(ctx, i < so->inputs_count);
2500
2501 used_components[i] |= 1 << j;
2502 }
2503 }
2504 }
2505
2506 /*
2507 * Second Step: reassign varying inloc/slots:
2508 */
2509
2510 unsigned actual_in = 0;
2511 unsigned inloc = 0;
2512
2513 for (unsigned i = 0; i < so->inputs_count; i++) {
2514 unsigned compmask = 0, maxcomp = 0;
2515
2516 so->inputs[i].ncomp = 0;
2517 so->inputs[i].inloc = inloc;
2518 so->inputs[i].bary = false;
2519
2520 for (unsigned j = 0; j < 4; j++) {
2521 if (!(used_components[i] & (1 << j)))
2522 continue;
2523
2524 compmask |= (1 << j);
2525 actual_in++;
2526 so->inputs[i].ncomp++;
2527 maxcomp = j + 1;
2528
2529 /* at this point, since used_components[i] mask is only
2530 * considering varyings (ie. not sysvals) we know this
2531 * is a varying:
2532 */
2533 so->inputs[i].bary = true;
2534 }
2535
2536 if (so->inputs[i].bary) {
2537 so->varying_in++;
2538 so->inputs[i].compmask = (1 << maxcomp) - 1;
2539 inloc += maxcomp;
2540 }
2541 }
2542
2543 /*
2544 * Third Step: reassign packed inloc's:
2545 */
2546
2547 list_for_each_entry (struct ir3_block, block, &ctx->ir->block_list, node) {
2548 list_for_each_entry (struct ir3_instruction, instr, &block->instr_list, node) {
2549 if (is_input(instr)) {
2550 unsigned inloc = instr->regs[1]->iim_val;
2551 unsigned i = inloc / 4;
2552 unsigned j = inloc % 4;
2553
2554 instr->regs[1]->iim_val = so->inputs[i].inloc + j;
2555 }
2556 }
2557 }
2558 }
2559
2560 static void
2561 setup_output(struct ir3_context *ctx, nir_variable *out)
2562 {
2563 struct ir3_shader_variant *so = ctx->so;
2564 unsigned ncomp = glsl_get_components(out->type);
2565 unsigned n = out->data.driver_location;
2566 unsigned frac = out->data.location_frac;
2567 unsigned slot = out->data.location;
2568 unsigned comp = 0;
2569
2570 if (ctx->so->type == MESA_SHADER_FRAGMENT) {
2571 switch (slot) {
2572 case FRAG_RESULT_DEPTH:
2573 comp = 2; /* tgsi will write to .z component */
2574 so->writes_pos = true;
2575 break;
2576 case FRAG_RESULT_COLOR:
2577 so->color0_mrt = 1;
2578 break;
2579 case FRAG_RESULT_SAMPLE_MASK:
2580 so->writes_smask = true;
2581 break;
2582 default:
2583 if (slot >= FRAG_RESULT_DATA0)
2584 break;
2585 ir3_context_error(ctx, "unknown FS output name: %s\n",
2586 gl_frag_result_name(slot));
2587 }
2588 } else if (ctx->so->type == MESA_SHADER_VERTEX) {
2589 switch (slot) {
2590 case VARYING_SLOT_POS:
2591 so->writes_pos = true;
2592 break;
2593 case VARYING_SLOT_PSIZ:
2594 so->writes_psize = true;
2595 break;
2596 case VARYING_SLOT_COL0:
2597 case VARYING_SLOT_COL1:
2598 case VARYING_SLOT_BFC0:
2599 case VARYING_SLOT_BFC1:
2600 case VARYING_SLOT_FOGC:
2601 case VARYING_SLOT_CLIP_DIST0:
2602 case VARYING_SLOT_CLIP_DIST1:
2603 case VARYING_SLOT_CLIP_VERTEX:
2604 break;
2605 default:
2606 if (slot >= VARYING_SLOT_VAR0)
2607 break;
2608 if ((VARYING_SLOT_TEX0 <= slot) && (slot <= VARYING_SLOT_TEX7))
2609 break;
2610 ir3_context_error(ctx, "unknown VS output name: %s\n",
2611 gl_varying_slot_name(slot));
2612 }
2613 } else {
2614 ir3_context_error(ctx, "unknown shader type: %d\n", ctx->so->type);
2615 }
2616
2617 compile_assert(ctx, n < ARRAY_SIZE(so->outputs));
2618
2619 so->outputs[n].slot = slot;
2620 so->outputs[n].regid = regid(n, comp);
2621 so->outputs_count = MAX2(so->outputs_count, n + 1);
2622
2623 for (int i = 0; i < ncomp; i++) {
2624 unsigned idx = (n * 4) + i + frac;
2625 compile_assert(ctx, idx < ctx->ir->noutputs);
2626 ctx->ir->outputs[idx] = create_immed(ctx->block, fui(0.0));
2627 }
2628
2629 /* if varying packing doesn't happen, we could end up in a situation
2630 * with "holes" in the output, and since the per-generation code that
2631 * sets up varying linkage registers doesn't expect to have more than
2632 * one varying per vec4 slot, pad the holes.
2633 *
2634 * Note that this should probably generate a performance warning of
2635 * some sort.
2636 */
2637 for (int i = 0; i < frac; i++) {
2638 unsigned idx = (n * 4) + i;
2639 if (!ctx->ir->outputs[idx]) {
2640 ctx->ir->outputs[idx] = create_immed(ctx->block, fui(0.0));
2641 }
2642 }
2643 }
2644
2645 static int
2646 max_drvloc(struct exec_list *vars)
2647 {
2648 int drvloc = -1;
2649 nir_foreach_variable(var, vars) {
2650 drvloc = MAX2(drvloc, (int)var->data.driver_location);
2651 }
2652 return drvloc;
2653 }
2654
2655 static const unsigned max_sysvals[] = {
2656 [MESA_SHADER_FRAGMENT] = 24, // TODO
2657 [MESA_SHADER_VERTEX] = 16,
2658 [MESA_SHADER_COMPUTE] = 16, // TODO how many do we actually need?
2659 [MESA_SHADER_KERNEL] = 16, // TODO how many do we actually need?
2660 };
2661
2662 static void
2663 emit_instructions(struct ir3_context *ctx)
2664 {
2665 unsigned ninputs, noutputs;
2666 nir_function_impl *fxn = nir_shader_get_entrypoint(ctx->s);
2667
2668 ninputs = (max_drvloc(&ctx->s->inputs) + 1) * 4;
2669 noutputs = (max_drvloc(&ctx->s->outputs) + 1) * 4;
2670
2671 /* we need to leave room for sysvals:
2672 */
2673 ninputs += max_sysvals[ctx->so->type];
2674
2675 ctx->ir = ir3_create(ctx->compiler, ctx->so->type, ninputs, noutputs);
2676
2677 /* Create inputs in first block: */
2678 ctx->block = get_block(ctx, nir_start_block(fxn));
2679 ctx->in_block = ctx->block;
2680 list_addtail(&ctx->block->node, &ctx->ir->block_list);
2681
2682 ninputs -= max_sysvals[ctx->so->type];
2683
2684 /* for fragment shader, the vcoord input register is used as the
2685 * base for bary.f varying fetch instrs:
2686 *
2687 * TODO defer creating ctx->ij_pixel and corresponding sysvals
2688 * until emit_intrinsic when we know they are actually needed.
2689 * For now, we defer creating ctx->ij_centroid, etc, since we
2690 * only need ij_pixel for "old style" varying inputs (ie.
2691 * tgsi_to_nir)
2692 */
2693 struct ir3_instruction *vcoord = NULL;
2694 if (ctx->so->type == MESA_SHADER_FRAGMENT) {
2695 struct ir3_instruction *xy[2];
2696
2697 vcoord = create_input_compmask(ctx, 0, 0x3);
2698 ir3_split_dest(ctx->block, xy, vcoord, 0, 2);
2699
2700 ctx->ij_pixel = ir3_create_collect(ctx, xy, 2);
2701 }
2702
2703 /* Setup inputs: */
2704 nir_foreach_variable(var, &ctx->s->inputs) {
2705 setup_input(ctx, var);
2706 }
2707
2708 /* Defer add_sysval_input() stuff until after setup_inputs(),
2709 * because sysvals need to be appended after varyings:
2710 */
2711 if (vcoord) {
2712 add_sysval_input_compmask(ctx, SYSTEM_VALUE_BARYCENTRIC_PIXEL,
2713 0x3, vcoord);
2714 }
2715
2716 /* Setup outputs: */
2717 nir_foreach_variable(var, &ctx->s->outputs) {
2718 setup_output(ctx, var);
2719 }
2720
2721 /* Find # of samplers: */
2722 nir_foreach_variable(var, &ctx->s->uniforms) {
2723 ctx->so->num_samp += glsl_type_get_sampler_count(var->type);
2724 /* just assume that we'll be reading from images.. if it
2725 * is write-only we don't have to count it, but not sure
2726 * if there is a good way to know?
2727 */
2728 ctx->so->num_samp += glsl_type_get_image_count(var->type);
2729 }
2730
2731 /* NOTE: need to do something more clever when we support >1 fxn */
2732 nir_foreach_register(reg, &fxn->registers) {
2733 ir3_declare_array(ctx, reg);
2734 }
2735 /* And emit the body: */
2736 ctx->impl = fxn;
2737 emit_function(ctx, fxn);
2738 }
2739
2740 /* from NIR perspective, we actually have varying inputs. But the varying
2741 * inputs, from an IR standpoint, are just bary.f/ldlv instructions. The
2742 * only actual inputs are the sysvals.
2743 */
2744 static void
2745 fixup_frag_inputs(struct ir3_context *ctx)
2746 {
2747 struct ir3_shader_variant *so = ctx->so;
2748 struct ir3 *ir = ctx->ir;
2749 unsigned i = 0;
2750
2751 /* sysvals should appear at the end of the inputs, drop everything else: */
2752 while ((i < so->inputs_count) && !so->inputs[i].sysval)
2753 i++;
2754
2755 /* at IR level, inputs are always blocks of 4 scalars: */
2756 i *= 4;
2757
2758 ir->inputs = &ir->inputs[i];
2759 ir->ninputs -= i;
2760 }
2761
2762 /* Fixup tex sampler state for astc/srgb workaround instructions. We
2763 * need to assign the tex state indexes for these after we know the
2764 * max tex index.
2765 */
2766 static void
2767 fixup_astc_srgb(struct ir3_context *ctx)
2768 {
2769 struct ir3_shader_variant *so = ctx->so;
2770 /* indexed by original tex idx, value is newly assigned alpha sampler
2771 * state tex idx. Zero is invalid since there is at least one sampler
2772 * if we get here.
2773 */
2774 unsigned alt_tex_state[16] = {0};
2775 unsigned tex_idx = ctx->max_texture_index + 1;
2776 unsigned idx = 0;
2777
2778 so->astc_srgb.base = tex_idx;
2779
2780 for (unsigned i = 0; i < ctx->ir->astc_srgb_count; i++) {
2781 struct ir3_instruction *sam = ctx->ir->astc_srgb[i];
2782
2783 compile_assert(ctx, sam->cat5.tex < ARRAY_SIZE(alt_tex_state));
2784
2785 if (alt_tex_state[sam->cat5.tex] == 0) {
2786 /* assign new alternate/alpha tex state slot: */
2787 alt_tex_state[sam->cat5.tex] = tex_idx++;
2788 so->astc_srgb.orig_idx[idx++] = sam->cat5.tex;
2789 so->astc_srgb.count++;
2790 }
2791
2792 sam->cat5.tex = alt_tex_state[sam->cat5.tex];
2793 }
2794 }
2795
2796 static void
2797 fixup_binning_pass(struct ir3_context *ctx)
2798 {
2799 struct ir3_shader_variant *so = ctx->so;
2800 struct ir3 *ir = ctx->ir;
2801 unsigned i, j;
2802
2803 for (i = 0, j = 0; i < so->outputs_count; i++) {
2804 unsigned slot = so->outputs[i].slot;
2805
2806 /* throw away everything but first position/psize */
2807 if ((slot == VARYING_SLOT_POS) || (slot == VARYING_SLOT_PSIZ)) {
2808 if (i != j) {
2809 so->outputs[j] = so->outputs[i];
2810 ir->outputs[(j*4)+0] = ir->outputs[(i*4)+0];
2811 ir->outputs[(j*4)+1] = ir->outputs[(i*4)+1];
2812 ir->outputs[(j*4)+2] = ir->outputs[(i*4)+2];
2813 ir->outputs[(j*4)+3] = ir->outputs[(i*4)+3];
2814 }
2815 j++;
2816 }
2817 }
2818 so->outputs_count = j;
2819 ir->noutputs = j * 4;
2820 }
2821
2822 int
2823 ir3_compile_shader_nir(struct ir3_compiler *compiler,
2824 struct ir3_shader_variant *so)
2825 {
2826 struct ir3_context *ctx;
2827 struct ir3 *ir;
2828 struct ir3_instruction **inputs;
2829 unsigned i;
2830 int ret = 0, max_bary;
2831
2832 assert(!so->ir);
2833
2834 ctx = ir3_context_init(compiler, so);
2835 if (!ctx) {
2836 DBG("INIT failed!");
2837 ret = -1;
2838 goto out;
2839 }
2840
2841 emit_instructions(ctx);
2842
2843 if (ctx->error) {
2844 DBG("EMIT failed!");
2845 ret = -1;
2846 goto out;
2847 }
2848
2849 ir = so->ir = ctx->ir;
2850
2851 /* keep track of the inputs from TGSI perspective.. */
2852 inputs = ir->inputs;
2853
2854 /* but fixup actual inputs for frag shader: */
2855 if (so->type == MESA_SHADER_FRAGMENT)
2856 fixup_frag_inputs(ctx);
2857
2858 /* at this point, for binning pass, throw away unneeded outputs: */
2859 if (so->binning_pass && (ctx->compiler->gpu_id < 600))
2860 fixup_binning_pass(ctx);
2861
2862 /* if we want half-precision outputs, mark the output registers
2863 * as half:
2864 */
2865 if (so->key.half_precision) {
2866 for (i = 0; i < ir->noutputs; i++) {
2867 struct ir3_instruction *out = ir->outputs[i];
2868
2869 if (!out)
2870 continue;
2871
2872 /* if frag shader writes z, that needs to be full precision: */
2873 if (so->outputs[i/4].slot == FRAG_RESULT_DEPTH)
2874 continue;
2875
2876 out->regs[0]->flags |= IR3_REG_HALF;
2877 /* output could be a fanout (ie. texture fetch output)
2878 * in which case we need to propagate the half-reg flag
2879 * up to the definer so that RA sees it:
2880 */
2881 if (out->opc == OPC_META_FO) {
2882 out = out->regs[1]->instr;
2883 out->regs[0]->flags |= IR3_REG_HALF;
2884 }
2885
2886 if (out->opc == OPC_MOV) {
2887 out->cat1.dst_type = half_type(out->cat1.dst_type);
2888 }
2889 }
2890 }
2891
2892 if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
2893 printf("BEFORE CP:\n");
2894 ir3_print(ir);
2895 }
2896
2897 ir3_cp(ir, so);
2898
2899 /* at this point, for binning pass, throw away unneeded outputs:
2900 * Note that for a6xx and later, we do this after ir3_cp to ensure
2901 * that the uniform/constant layout for BS and VS matches, so that
2902 * we can re-use same VS_CONST state group.
2903 */
2904 if (so->binning_pass && (ctx->compiler->gpu_id >= 600))
2905 fixup_binning_pass(ctx);
2906
2907 /* for a6xx+, binning and draw pass VS use same VBO state, so we
2908 * need to make sure not to remove any inputs that are used by
2909 * the nonbinning VS.
2910 */
2911 if (ctx->compiler->gpu_id >= 600 && so->binning_pass) {
2912 debug_assert(so->type == MESA_SHADER_VERTEX);
2913 for (int i = 0; i < ir->ninputs; i++) {
2914 struct ir3_instruction *in = ir->inputs[i];
2915
2916 if (!in)
2917 continue;
2918
2919 unsigned n = i / 4;
2920 unsigned c = i % 4;
2921
2922 debug_assert(n < so->nonbinning->inputs_count);
2923
2924 if (so->nonbinning->inputs[n].sysval)
2925 continue;
2926
2927 /* be sure to keep inputs, even if only used in VS */
2928 if (so->nonbinning->inputs[n].compmask & (1 << c))
2929 array_insert(in->block, in->block->keeps, in);
2930 }
2931 }
2932
2933 /* Insert mov if there's same instruction for each output.
2934 * eg. dEQP-GLES31.functional.shaders.opaque_type_indexing.sampler.const_expression.vertex.sampler2dshadow
2935 */
2936 for (int i = ir->noutputs - 1; i >= 0; i--) {
2937 if (!ir->outputs[i])
2938 continue;
2939 for (unsigned j = 0; j < i; j++) {
2940 if (ir->outputs[i] == ir->outputs[j]) {
2941 ir->outputs[i] =
2942 ir3_MOV(ir->outputs[i]->block, ir->outputs[i], TYPE_F32);
2943 }
2944 }
2945 }
2946
2947 if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
2948 printf("BEFORE GROUPING:\n");
2949 ir3_print(ir);
2950 }
2951
2952 ir3_sched_add_deps(ir);
2953
2954 /* Group left/right neighbors, inserting mov's where needed to
2955 * solve conflicts:
2956 */
2957 ir3_group(ir);
2958
2959 if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
2960 printf("AFTER GROUPING:\n");
2961 ir3_print(ir);
2962 }
2963
2964 ir3_depth(ir);
2965
2966 if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
2967 printf("AFTER DEPTH:\n");
2968 ir3_print(ir);
2969 }
2970
2971 /* do Sethi–Ullman numbering before scheduling: */
2972 ir3_sun(ir);
2973
2974 ret = ir3_sched(ir);
2975 if (ret) {
2976 DBG("SCHED failed!");
2977 goto out;
2978 }
2979
2980 if (compiler->gpu_id >= 600) {
2981 ir3_a6xx_fixup_atomic_dests(ir, so);
2982 }
2983
2984 if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
2985 printf("AFTER SCHED:\n");
2986 ir3_print(ir);
2987 }
2988
2989 ret = ir3_ra(so);
2990 if (ret) {
2991 DBG("RA failed!");
2992 goto out;
2993 }
2994
2995 if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
2996 printf("AFTER RA:\n");
2997 ir3_print(ir);
2998 }
2999
3000 if (so->type == MESA_SHADER_FRAGMENT)
3001 pack_inlocs(ctx);
3002
3003 /* fixup input/outputs: */
3004 for (i = 0; i < so->outputs_count; i++) {
3005 /* sometimes we get outputs that don't write the .x coord, like:
3006 *
3007 * decl_var shader_out INTERP_MODE_NONE float Color (VARYING_SLOT_VAR9.z, 1, 0)
3008 *
3009 * Presumably the result of varying packing and then eliminating
3010 * some unneeded varyings? Just skip head to the first valid
3011 * component of the output.
3012 */
3013 for (unsigned j = 0; j < 4; j++) {
3014 struct ir3_instruction *instr = ir->outputs[(i*4) + j];
3015 if (instr) {
3016 so->outputs[i].regid = instr->regs[0]->num;
3017 so->outputs[i].half = !!(instr->regs[0]->flags & IR3_REG_HALF);
3018 break;
3019 }
3020 }
3021 }
3022
3023 /* Note that some or all channels of an input may be unused: */
3024 for (i = 0; i < so->inputs_count; i++) {
3025 unsigned j, reg = regid(63,0);
3026 bool half = false;
3027 for (j = 0; j < 4; j++) {
3028 struct ir3_instruction *in = inputs[(i*4) + j];
3029
3030 if (!in)
3031 continue;
3032
3033 if (in->flags & IR3_INSTR_UNUSED)
3034 continue;
3035
3036 reg = in->regs[0]->num - j;
3037 if (half) {
3038 compile_assert(ctx, in->regs[0]->flags & IR3_REG_HALF);
3039 } else {
3040 half = !!(in->regs[0]->flags & IR3_REG_HALF);
3041 }
3042 }
3043 so->inputs[i].regid = reg;
3044 so->inputs[i].half = half;
3045 }
3046
3047 if (ctx->astc_srgb)
3048 fixup_astc_srgb(ctx);
3049
3050 /* We need to do legalize after (for frag shader's) the "bary.f"
3051 * offsets (inloc) have been assigned.
3052 */
3053 ir3_legalize(ir, &so->has_ssbo, &so->need_pixlod, &max_bary);
3054
3055 if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
3056 printf("AFTER LEGALIZE:\n");
3057 ir3_print(ir);
3058 }
3059
3060 so->branchstack = ctx->max_stack;
3061
3062 /* Note that actual_in counts inputs that are not bary.f'd for FS: */
3063 if (so->type == MESA_SHADER_FRAGMENT)
3064 so->total_in = max_bary + 1;
3065
3066 so->max_sun = ir->max_sun;
3067
3068 out:
3069 if (ret) {
3070 if (so->ir)
3071 ir3_destroy(so->ir);
3072 so->ir = NULL;
3073 }
3074 ir3_context_free(ctx);
3075
3076 return ret;
3077 }