freedreno/ir3: Make imageStore use num components from image format
[mesa.git] / src / freedreno / ir3 / ir3_compiler_nir.c
1 /*
2 * Copyright (C) 2015 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #include <stdarg.h>
28
29 #include "util/u_string.h"
30 #include "util/u_memory.h"
31 #include "util/u_math.h"
32
33 #include "ir3_compiler.h"
34 #include "ir3_shader.h"
35 #include "ir3_nir.h"
36
37 #include "instr-a3xx.h"
38 #include "ir3.h"
39 #include "ir3_context.h"
40
41
42 static struct ir3_instruction *
43 create_indirect_load(struct ir3_context *ctx, unsigned arrsz, int n,
44 struct ir3_instruction *address, struct ir3_instruction *collect)
45 {
46 struct ir3_block *block = ctx->block;
47 struct ir3_instruction *mov;
48 struct ir3_register *src;
49
50 mov = ir3_instr_create(block, OPC_MOV);
51 mov->cat1.src_type = TYPE_U32;
52 mov->cat1.dst_type = TYPE_U32;
53 ir3_reg_create(mov, 0, 0);
54 src = ir3_reg_create(mov, 0, IR3_REG_SSA | IR3_REG_RELATIV);
55 src->instr = collect;
56 src->size = arrsz;
57 src->array.offset = n;
58
59 ir3_instr_set_address(mov, address);
60
61 return mov;
62 }
63
64 static struct ir3_instruction *
65 create_input_compmask(struct ir3_context *ctx, unsigned n, unsigned compmask)
66 {
67 struct ir3_instruction *in;
68
69 in = ir3_instr_create(ctx->in_block, OPC_META_INPUT);
70 in->inout.block = ctx->in_block;
71 ir3_reg_create(in, n, 0);
72
73 in->regs[0]->wrmask = compmask;
74
75 return in;
76 }
77
78 static struct ir3_instruction *
79 create_input(struct ir3_context *ctx, unsigned n)
80 {
81 return create_input_compmask(ctx, n, 0x1);
82 }
83
84 static struct ir3_instruction *
85 create_frag_input(struct ir3_context *ctx, bool use_ldlv)
86 {
87 struct ir3_block *block = ctx->block;
88 struct ir3_instruction *instr;
89 /* actual inloc is assigned and fixed up later: */
90 struct ir3_instruction *inloc = create_immed(block, 0);
91
92 if (use_ldlv) {
93 instr = ir3_LDLV(block, inloc, 0, create_immed(block, 1), 0);
94 instr->cat6.type = TYPE_U32;
95 instr->cat6.iim_val = 1;
96 } else {
97 instr = ir3_BARY_F(block, inloc, 0, ctx->frag_vcoord, 0);
98 instr->regs[2]->wrmask = 0x3;
99 }
100
101 return instr;
102 }
103
104 static struct ir3_instruction *
105 create_driver_param(struct ir3_context *ctx, enum ir3_driver_param dp)
106 {
107 /* first four vec4 sysval's reserved for UBOs: */
108 /* NOTE: dp is in scalar, but there can be >4 dp components: */
109 unsigned n = ctx->so->constbase.driver_param;
110 unsigned r = regid(n + dp / 4, dp % 4);
111 return create_uniform(ctx->block, r);
112 }
113
114 /*
115 * Adreno uses uint rather than having dedicated bool type,
116 * which (potentially) requires some conversion, in particular
117 * when using output of an bool instr to int input, or visa
118 * versa.
119 *
120 * | Adreno | NIR |
121 * -------+---------+-------+-
122 * true | 1 | ~0 |
123 * false | 0 | 0 |
124 *
125 * To convert from an adreno bool (uint) to nir, use:
126 *
127 * absneg.s dst, (neg)src
128 *
129 * To convert back in the other direction:
130 *
131 * absneg.s dst, (abs)arc
132 *
133 * The CP step can clean up the absneg.s that cancel each other
134 * out, and with a slight bit of extra cleverness (to recognize
135 * the instructions which produce either a 0 or 1) can eliminate
136 * the absneg.s's completely when an instruction that wants
137 * 0/1 consumes the result. For example, when a nir 'bcsel'
138 * consumes the result of 'feq'. So we should be able to get by
139 * without a boolean resolve step, and without incuring any
140 * extra penalty in instruction count.
141 */
142
143 /* NIR bool -> native (adreno): */
144 static struct ir3_instruction *
145 ir3_b2n(struct ir3_block *block, struct ir3_instruction *instr)
146 {
147 return ir3_ABSNEG_S(block, instr, IR3_REG_SABS);
148 }
149
150 /* native (adreno) -> NIR bool: */
151 static struct ir3_instruction *
152 ir3_n2b(struct ir3_block *block, struct ir3_instruction *instr)
153 {
154 return ir3_ABSNEG_S(block, instr, IR3_REG_SNEG);
155 }
156
157 /*
158 * alu/sfu instructions:
159 */
160
161 static struct ir3_instruction *
162 create_cov(struct ir3_context *ctx, struct ir3_instruction *src,
163 unsigned src_bitsize, nir_op op)
164 {
165 type_t src_type, dst_type;
166
167 switch (op) {
168 case nir_op_f2f32:
169 case nir_op_f2f16_rtne:
170 case nir_op_f2f16_rtz:
171 case nir_op_f2f16:
172 case nir_op_f2i32:
173 case nir_op_f2i16:
174 case nir_op_f2i8:
175 case nir_op_f2u32:
176 case nir_op_f2u16:
177 case nir_op_f2u8:
178 switch (src_bitsize) {
179 case 32:
180 src_type = TYPE_F32;
181 break;
182 case 16:
183 src_type = TYPE_F16;
184 break;
185 default:
186 ir3_context_error(ctx, "invalid src bit size: %u", src_bitsize);
187 }
188 break;
189
190 case nir_op_i2f32:
191 case nir_op_i2f16:
192 case nir_op_i2i32:
193 case nir_op_i2i16:
194 case nir_op_i2i8:
195 switch (src_bitsize) {
196 case 32:
197 src_type = TYPE_S32;
198 break;
199 case 16:
200 src_type = TYPE_S16;
201 break;
202 case 8:
203 src_type = TYPE_S8;
204 break;
205 default:
206 ir3_context_error(ctx, "invalid src bit size: %u", src_bitsize);
207 }
208 break;
209
210 case nir_op_u2f32:
211 case nir_op_u2f16:
212 case nir_op_u2u32:
213 case nir_op_u2u16:
214 case nir_op_u2u8:
215 switch (src_bitsize) {
216 case 32:
217 src_type = TYPE_U32;
218 break;
219 case 16:
220 src_type = TYPE_U16;
221 break;
222 case 8:
223 src_type = TYPE_U8;
224 break;
225 default:
226 ir3_context_error(ctx, "invalid src bit size: %u", src_bitsize);
227 }
228 break;
229
230 default:
231 ir3_context_error(ctx, "invalid conversion op: %u", op);
232 }
233
234 switch (op) {
235 case nir_op_f2f32:
236 case nir_op_i2f32:
237 case nir_op_u2f32:
238 dst_type = TYPE_F32;
239 break;
240
241 case nir_op_f2f16_rtne:
242 case nir_op_f2f16_rtz:
243 case nir_op_f2f16:
244 /* TODO how to handle rounding mode? */
245 case nir_op_i2f16:
246 case nir_op_u2f16:
247 dst_type = TYPE_F16;
248 break;
249
250 case nir_op_f2i32:
251 case nir_op_i2i32:
252 dst_type = TYPE_S32;
253 break;
254
255 case nir_op_f2i16:
256 case nir_op_i2i16:
257 dst_type = TYPE_S16;
258 break;
259
260 case nir_op_f2i8:
261 case nir_op_i2i8:
262 dst_type = TYPE_S8;
263 break;
264
265 case nir_op_f2u32:
266 case nir_op_u2u32:
267 dst_type = TYPE_U32;
268 break;
269
270 case nir_op_f2u16:
271 case nir_op_u2u16:
272 dst_type = TYPE_U16;
273 break;
274
275 case nir_op_f2u8:
276 case nir_op_u2u8:
277 dst_type = TYPE_U8;
278 break;
279
280 default:
281 ir3_context_error(ctx, "invalid conversion op: %u", op);
282 }
283
284 return ir3_COV(ctx->block, src, src_type, dst_type);
285 }
286
287 static void
288 emit_alu(struct ir3_context *ctx, nir_alu_instr *alu)
289 {
290 const nir_op_info *info = &nir_op_infos[alu->op];
291 struct ir3_instruction **dst, *src[info->num_inputs];
292 unsigned bs[info->num_inputs]; /* bit size */
293 struct ir3_block *b = ctx->block;
294 unsigned dst_sz, wrmask;
295
296 if (alu->dest.dest.is_ssa) {
297 dst_sz = alu->dest.dest.ssa.num_components;
298 wrmask = (1 << dst_sz) - 1;
299 } else {
300 dst_sz = alu->dest.dest.reg.reg->num_components;
301 wrmask = alu->dest.write_mask;
302 }
303
304 dst = ir3_get_dst(ctx, &alu->dest.dest, dst_sz);
305
306 /* Vectors are special in that they have non-scalarized writemasks,
307 * and just take the first swizzle channel for each argument in
308 * order into each writemask channel.
309 */
310 if ((alu->op == nir_op_vec2) ||
311 (alu->op == nir_op_vec3) ||
312 (alu->op == nir_op_vec4)) {
313
314 for (int i = 0; i < info->num_inputs; i++) {
315 nir_alu_src *asrc = &alu->src[i];
316
317 compile_assert(ctx, !asrc->abs);
318 compile_assert(ctx, !asrc->negate);
319
320 src[i] = ir3_get_src(ctx, &asrc->src)[asrc->swizzle[0]];
321 if (!src[i])
322 src[i] = create_immed(ctx->block, 0);
323 dst[i] = ir3_MOV(b, src[i], TYPE_U32);
324 }
325
326 put_dst(ctx, &alu->dest.dest);
327 return;
328 }
329
330 /* We also get mov's with more than one component for mov's so
331 * handle those specially:
332 */
333 if ((alu->op == nir_op_imov) || (alu->op == nir_op_fmov)) {
334 type_t type = (alu->op == nir_op_imov) ? TYPE_U32 : TYPE_F32;
335 nir_alu_src *asrc = &alu->src[0];
336 struct ir3_instruction *const *src0 = ir3_get_src(ctx, &asrc->src);
337
338 for (unsigned i = 0; i < dst_sz; i++) {
339 if (wrmask & (1 << i)) {
340 dst[i] = ir3_MOV(b, src0[asrc->swizzle[i]], type);
341 } else {
342 dst[i] = NULL;
343 }
344 }
345
346 put_dst(ctx, &alu->dest.dest);
347 return;
348 }
349
350 /* General case: We can just grab the one used channel per src. */
351 for (int i = 0; i < info->num_inputs; i++) {
352 unsigned chan = ffs(alu->dest.write_mask) - 1;
353 nir_alu_src *asrc = &alu->src[i];
354
355 compile_assert(ctx, !asrc->abs);
356 compile_assert(ctx, !asrc->negate);
357
358 src[i] = ir3_get_src(ctx, &asrc->src)[asrc->swizzle[chan]];
359 bs[i] = nir_src_bit_size(asrc->src);
360
361 compile_assert(ctx, src[i]);
362 }
363
364 switch (alu->op) {
365 case nir_op_f2f32:
366 case nir_op_f2f16_rtne:
367 case nir_op_f2f16_rtz:
368 case nir_op_f2f16:
369 case nir_op_f2i32:
370 case nir_op_f2i16:
371 case nir_op_f2i8:
372 case nir_op_f2u32:
373 case nir_op_f2u16:
374 case nir_op_f2u8:
375 case nir_op_i2f32:
376 case nir_op_i2f16:
377 case nir_op_i2i32:
378 case nir_op_i2i16:
379 case nir_op_i2i8:
380 case nir_op_u2f32:
381 case nir_op_u2f16:
382 case nir_op_u2u32:
383 case nir_op_u2u16:
384 case nir_op_u2u8:
385 dst[0] = create_cov(ctx, src[0], bs[0], alu->op);
386 break;
387 case nir_op_f2b32:
388 dst[0] = ir3_CMPS_F(b, src[0], 0, create_immed(b, fui(0.0)), 0);
389 dst[0]->cat2.condition = IR3_COND_NE;
390 dst[0] = ir3_n2b(b, dst[0]);
391 break;
392 case nir_op_b2f16:
393 case nir_op_b2f32:
394 dst[0] = ir3_COV(b, ir3_b2n(b, src[0]), TYPE_U32, TYPE_F32);
395 break;
396 case nir_op_b2i8:
397 case nir_op_b2i16:
398 case nir_op_b2i32:
399 dst[0] = ir3_b2n(b, src[0]);
400 break;
401 case nir_op_i2b32:
402 dst[0] = ir3_CMPS_S(b, src[0], 0, create_immed(b, 0), 0);
403 dst[0]->cat2.condition = IR3_COND_NE;
404 dst[0] = ir3_n2b(b, dst[0]);
405 break;
406
407 case nir_op_fneg:
408 dst[0] = ir3_ABSNEG_F(b, src[0], IR3_REG_FNEG);
409 break;
410 case nir_op_fabs:
411 dst[0] = ir3_ABSNEG_F(b, src[0], IR3_REG_FABS);
412 break;
413 case nir_op_fmax:
414 dst[0] = ir3_MAX_F(b, src[0], 0, src[1], 0);
415 break;
416 case nir_op_fmin:
417 dst[0] = ir3_MIN_F(b, src[0], 0, src[1], 0);
418 break;
419 case nir_op_fsat:
420 /* if there is just a single use of the src, and it supports
421 * (sat) bit, we can just fold the (sat) flag back to the
422 * src instruction and create a mov. This is easier for cp
423 * to eliminate.
424 *
425 * TODO probably opc_cat==4 is ok too
426 */
427 if (alu->src[0].src.is_ssa &&
428 (list_length(&alu->src[0].src.ssa->uses) == 1) &&
429 ((opc_cat(src[0]->opc) == 2) || (opc_cat(src[0]->opc) == 3))) {
430 src[0]->flags |= IR3_INSTR_SAT;
431 dst[0] = ir3_MOV(b, src[0], TYPE_U32);
432 } else {
433 /* otherwise generate a max.f that saturates.. blob does
434 * similar (generating a cat2 mov using max.f)
435 */
436 dst[0] = ir3_MAX_F(b, src[0], 0, src[0], 0);
437 dst[0]->flags |= IR3_INSTR_SAT;
438 }
439 break;
440 case nir_op_fmul:
441 dst[0] = ir3_MUL_F(b, src[0], 0, src[1], 0);
442 break;
443 case nir_op_fadd:
444 dst[0] = ir3_ADD_F(b, src[0], 0, src[1], 0);
445 break;
446 case nir_op_fsub:
447 dst[0] = ir3_ADD_F(b, src[0], 0, src[1], IR3_REG_FNEG);
448 break;
449 case nir_op_ffma:
450 dst[0] = ir3_MAD_F32(b, src[0], 0, src[1], 0, src[2], 0);
451 break;
452 case nir_op_fddx:
453 dst[0] = ir3_DSX(b, src[0], 0);
454 dst[0]->cat5.type = TYPE_F32;
455 break;
456 case nir_op_fddy:
457 dst[0] = ir3_DSY(b, src[0], 0);
458 dst[0]->cat5.type = TYPE_F32;
459 break;
460 break;
461 case nir_op_flt32:
462 dst[0] = ir3_CMPS_F(b, src[0], 0, src[1], 0);
463 dst[0]->cat2.condition = IR3_COND_LT;
464 dst[0] = ir3_n2b(b, dst[0]);
465 break;
466 case nir_op_fge32:
467 dst[0] = ir3_CMPS_F(b, src[0], 0, src[1], 0);
468 dst[0]->cat2.condition = IR3_COND_GE;
469 dst[0] = ir3_n2b(b, dst[0]);
470 break;
471 case nir_op_feq32:
472 dst[0] = ir3_CMPS_F(b, src[0], 0, src[1], 0);
473 dst[0]->cat2.condition = IR3_COND_EQ;
474 dst[0] = ir3_n2b(b, dst[0]);
475 break;
476 case nir_op_fne32:
477 dst[0] = ir3_CMPS_F(b, src[0], 0, src[1], 0);
478 dst[0]->cat2.condition = IR3_COND_NE;
479 dst[0] = ir3_n2b(b, dst[0]);
480 break;
481 case nir_op_fceil:
482 dst[0] = ir3_CEIL_F(b, src[0], 0);
483 break;
484 case nir_op_ffloor:
485 dst[0] = ir3_FLOOR_F(b, src[0], 0);
486 break;
487 case nir_op_ftrunc:
488 dst[0] = ir3_TRUNC_F(b, src[0], 0);
489 break;
490 case nir_op_fround_even:
491 dst[0] = ir3_RNDNE_F(b, src[0], 0);
492 break;
493 case nir_op_fsign:
494 dst[0] = ir3_SIGN_F(b, src[0], 0);
495 break;
496
497 case nir_op_fsin:
498 dst[0] = ir3_SIN(b, src[0], 0);
499 break;
500 case nir_op_fcos:
501 dst[0] = ir3_COS(b, src[0], 0);
502 break;
503 case nir_op_frsq:
504 dst[0] = ir3_RSQ(b, src[0], 0);
505 break;
506 case nir_op_frcp:
507 dst[0] = ir3_RCP(b, src[0], 0);
508 break;
509 case nir_op_flog2:
510 dst[0] = ir3_LOG2(b, src[0], 0);
511 break;
512 case nir_op_fexp2:
513 dst[0] = ir3_EXP2(b, src[0], 0);
514 break;
515 case nir_op_fsqrt:
516 dst[0] = ir3_SQRT(b, src[0], 0);
517 break;
518
519 case nir_op_iabs:
520 dst[0] = ir3_ABSNEG_S(b, src[0], IR3_REG_SABS);
521 break;
522 case nir_op_iadd:
523 dst[0] = ir3_ADD_U(b, src[0], 0, src[1], 0);
524 break;
525 case nir_op_iand:
526 dst[0] = ir3_AND_B(b, src[0], 0, src[1], 0);
527 break;
528 case nir_op_imax:
529 dst[0] = ir3_MAX_S(b, src[0], 0, src[1], 0);
530 break;
531 case nir_op_umax:
532 dst[0] = ir3_MAX_U(b, src[0], 0, src[1], 0);
533 break;
534 case nir_op_imin:
535 dst[0] = ir3_MIN_S(b, src[0], 0, src[1], 0);
536 break;
537 case nir_op_umin:
538 dst[0] = ir3_MIN_U(b, src[0], 0, src[1], 0);
539 break;
540 case nir_op_imul:
541 /*
542 * dst = (al * bl) + (ah * bl << 16) + (al * bh << 16)
543 * mull.u tmp0, a, b ; mul low, i.e. al * bl
544 * madsh.m16 tmp1, a, b, tmp0 ; mul-add shift high mix, i.e. ah * bl << 16
545 * madsh.m16 dst, b, a, tmp1 ; i.e. al * bh << 16
546 */
547 dst[0] = ir3_MADSH_M16(b, src[1], 0, src[0], 0,
548 ir3_MADSH_M16(b, src[0], 0, src[1], 0,
549 ir3_MULL_U(b, src[0], 0, src[1], 0), 0), 0);
550 break;
551 case nir_op_ineg:
552 dst[0] = ir3_ABSNEG_S(b, src[0], IR3_REG_SNEG);
553 break;
554 case nir_op_inot:
555 dst[0] = ir3_NOT_B(b, src[0], 0);
556 break;
557 case nir_op_ior:
558 dst[0] = ir3_OR_B(b, src[0], 0, src[1], 0);
559 break;
560 case nir_op_ishl:
561 dst[0] = ir3_SHL_B(b, src[0], 0, src[1], 0);
562 break;
563 case nir_op_ishr:
564 dst[0] = ir3_ASHR_B(b, src[0], 0, src[1], 0);
565 break;
566 case nir_op_isign: {
567 /* maybe this would be sane to lower in nir.. */
568 struct ir3_instruction *neg, *pos;
569
570 neg = ir3_CMPS_S(b, src[0], 0, create_immed(b, 0), 0);
571 neg->cat2.condition = IR3_COND_LT;
572
573 pos = ir3_CMPS_S(b, src[0], 0, create_immed(b, 0), 0);
574 pos->cat2.condition = IR3_COND_GT;
575
576 dst[0] = ir3_SUB_U(b, pos, 0, neg, 0);
577
578 break;
579 }
580 case nir_op_isub:
581 dst[0] = ir3_SUB_U(b, src[0], 0, src[1], 0);
582 break;
583 case nir_op_ixor:
584 dst[0] = ir3_XOR_B(b, src[0], 0, src[1], 0);
585 break;
586 case nir_op_ushr:
587 dst[0] = ir3_SHR_B(b, src[0], 0, src[1], 0);
588 break;
589 case nir_op_ilt32:
590 dst[0] = ir3_CMPS_S(b, src[0], 0, src[1], 0);
591 dst[0]->cat2.condition = IR3_COND_LT;
592 dst[0] = ir3_n2b(b, dst[0]);
593 break;
594 case nir_op_ige32:
595 dst[0] = ir3_CMPS_S(b, src[0], 0, src[1], 0);
596 dst[0]->cat2.condition = IR3_COND_GE;
597 dst[0] = ir3_n2b(b, dst[0]);
598 break;
599 case nir_op_ieq32:
600 dst[0] = ir3_CMPS_S(b, src[0], 0, src[1], 0);
601 dst[0]->cat2.condition = IR3_COND_EQ;
602 dst[0] = ir3_n2b(b, dst[0]);
603 break;
604 case nir_op_ine32:
605 dst[0] = ir3_CMPS_S(b, src[0], 0, src[1], 0);
606 dst[0]->cat2.condition = IR3_COND_NE;
607 dst[0] = ir3_n2b(b, dst[0]);
608 break;
609 case nir_op_ult32:
610 dst[0] = ir3_CMPS_U(b, src[0], 0, src[1], 0);
611 dst[0]->cat2.condition = IR3_COND_LT;
612 dst[0] = ir3_n2b(b, dst[0]);
613 break;
614 case nir_op_uge32:
615 dst[0] = ir3_CMPS_U(b, src[0], 0, src[1], 0);
616 dst[0]->cat2.condition = IR3_COND_GE;
617 dst[0] = ir3_n2b(b, dst[0]);
618 break;
619
620 case nir_op_b32csel: {
621 struct ir3_instruction *cond = ir3_b2n(b, src[0]);
622 compile_assert(ctx, bs[1] == bs[2]);
623 /* the boolean condition is 32b even if src[1] and src[2] are
624 * half-precision, but sel.b16 wants all three src's to be the
625 * same type.
626 */
627 if (bs[1] < 32)
628 cond = ir3_COV(b, cond, TYPE_U32, TYPE_U16);
629 dst[0] = ir3_SEL_B32(b, src[1], 0, cond, 0, src[2], 0);
630 break;
631 }
632 case nir_op_bit_count:
633 dst[0] = ir3_CBITS_B(b, src[0], 0);
634 break;
635 case nir_op_ifind_msb: {
636 struct ir3_instruction *cmp;
637 dst[0] = ir3_CLZ_S(b, src[0], 0);
638 cmp = ir3_CMPS_S(b, dst[0], 0, create_immed(b, 0), 0);
639 cmp->cat2.condition = IR3_COND_GE;
640 dst[0] = ir3_SEL_B32(b,
641 ir3_SUB_U(b, create_immed(b, 31), 0, dst[0], 0), 0,
642 cmp, 0, dst[0], 0);
643 break;
644 }
645 case nir_op_ufind_msb:
646 dst[0] = ir3_CLZ_B(b, src[0], 0);
647 dst[0] = ir3_SEL_B32(b,
648 ir3_SUB_U(b, create_immed(b, 31), 0, dst[0], 0), 0,
649 src[0], 0, dst[0], 0);
650 break;
651 case nir_op_find_lsb:
652 dst[0] = ir3_BFREV_B(b, src[0], 0);
653 dst[0] = ir3_CLZ_B(b, dst[0], 0);
654 break;
655 case nir_op_bitfield_reverse:
656 dst[0] = ir3_BFREV_B(b, src[0], 0);
657 break;
658
659 default:
660 ir3_context_error(ctx, "Unhandled ALU op: %s\n",
661 nir_op_infos[alu->op].name);
662 break;
663 }
664
665 put_dst(ctx, &alu->dest.dest);
666 }
667
668 /* handles direct/indirect UBO reads: */
669 static void
670 emit_intrinsic_load_ubo(struct ir3_context *ctx, nir_intrinsic_instr *intr,
671 struct ir3_instruction **dst)
672 {
673 struct ir3_block *b = ctx->block;
674 struct ir3_instruction *base_lo, *base_hi, *addr, *src0, *src1;
675 nir_const_value *const_offset;
676 /* UBO addresses are the first driver params: */
677 unsigned ubo = regid(ctx->so->constbase.ubo, 0);
678 const unsigned ptrsz = ir3_pointer_size(ctx);
679
680 int off = 0;
681
682 /* First src is ubo index, which could either be an immed or not: */
683 src0 = ir3_get_src(ctx, &intr->src[0])[0];
684 if (is_same_type_mov(src0) &&
685 (src0->regs[1]->flags & IR3_REG_IMMED)) {
686 base_lo = create_uniform(b, ubo + (src0->regs[1]->iim_val * ptrsz));
687 base_hi = create_uniform(b, ubo + (src0->regs[1]->iim_val * ptrsz) + 1);
688 } else {
689 base_lo = create_uniform_indirect(b, ubo, ir3_get_addr(ctx, src0, 4));
690 base_hi = create_uniform_indirect(b, ubo + 1, ir3_get_addr(ctx, src0, 4));
691 }
692
693 /* note: on 32bit gpu's base_hi is ignored and DCE'd */
694 addr = base_lo;
695
696 const_offset = nir_src_as_const_value(intr->src[1]);
697 if (const_offset) {
698 off += const_offset->u32[0];
699 } else {
700 /* For load_ubo_indirect, second src is indirect offset: */
701 src1 = ir3_get_src(ctx, &intr->src[1])[0];
702
703 /* and add offset to addr: */
704 addr = ir3_ADD_S(b, addr, 0, src1, 0);
705 }
706
707 /* if offset is to large to encode in the ldg, split it out: */
708 if ((off + (intr->num_components * 4)) > 1024) {
709 /* split out the minimal amount to improve the odds that
710 * cp can fit the immediate in the add.s instruction:
711 */
712 unsigned off2 = off + (intr->num_components * 4) - 1024;
713 addr = ir3_ADD_S(b, addr, 0, create_immed(b, off2), 0);
714 off -= off2;
715 }
716
717 if (ptrsz == 2) {
718 struct ir3_instruction *carry;
719
720 /* handle 32b rollover, ie:
721 * if (addr < base_lo)
722 * base_hi++
723 */
724 carry = ir3_CMPS_U(b, addr, 0, base_lo, 0);
725 carry->cat2.condition = IR3_COND_LT;
726 base_hi = ir3_ADD_S(b, base_hi, 0, carry, 0);
727
728 addr = ir3_create_collect(ctx, (struct ir3_instruction*[]){ addr, base_hi }, 2);
729 }
730
731 for (int i = 0; i < intr->num_components; i++) {
732 struct ir3_instruction *load =
733 ir3_LDG(b, addr, 0, create_immed(b, 1), 0);
734 load->cat6.type = TYPE_U32;
735 load->cat6.src_offset = off + i * 4; /* byte offset */
736 dst[i] = load;
737 }
738 }
739
740 /* src[] = { buffer_index, offset }. No const_index */
741 static void
742 emit_intrinsic_load_ssbo(struct ir3_context *ctx, nir_intrinsic_instr *intr,
743 struct ir3_instruction **dst)
744 {
745 struct ir3_block *b = ctx->block;
746 struct ir3_instruction *ldgb, *src0, *src1, *offset;
747 nir_const_value *const_offset;
748
749 /* can this be non-const buffer_index? how do we handle that? */
750 const_offset = nir_src_as_const_value(intr->src[0]);
751 compile_assert(ctx, const_offset);
752
753 offset = ir3_get_src(ctx, &intr->src[1])[0];
754
755 /* src0 is uvec2(offset*4, 0), src1 is offset.. nir already *= 4: */
756 src0 = ir3_create_collect(ctx, (struct ir3_instruction*[]){
757 offset,
758 create_immed(b, 0),
759 }, 2);
760 src1 = ir3_SHR_B(b, offset, 0, create_immed(b, 2), 0);
761
762 ldgb = ir3_LDGB(b, create_immed(b, const_offset->u32[0]), 0,
763 src0, 0, src1, 0);
764 ldgb->regs[0]->wrmask = MASK(intr->num_components);
765 ldgb->cat6.iim_val = intr->num_components;
766 ldgb->cat6.d = 4;
767 ldgb->cat6.type = TYPE_U32;
768 ldgb->barrier_class = IR3_BARRIER_BUFFER_R;
769 ldgb->barrier_conflict = IR3_BARRIER_BUFFER_W;
770
771 ir3_split_dest(b, dst, ldgb, 0, intr->num_components);
772 }
773
774 /* src[] = { value, block_index, offset }. const_index[] = { write_mask } */
775 static void
776 emit_intrinsic_store_ssbo(struct ir3_context *ctx, nir_intrinsic_instr *intr)
777 {
778 struct ir3_block *b = ctx->block;
779 struct ir3_instruction *stgb, *src0, *src1, *src2, *offset;
780 nir_const_value *const_offset;
781 /* TODO handle wrmask properly, see _store_shared().. but I think
782 * it is more a PITA than that, since blob ends up loading the
783 * masked components and writing them back out.
784 */
785 unsigned wrmask = intr->const_index[0];
786 unsigned ncomp = ffs(~wrmask) - 1;
787
788 /* can this be non-const buffer_index? how do we handle that? */
789 const_offset = nir_src_as_const_value(intr->src[1]);
790 compile_assert(ctx, const_offset);
791
792 offset = ir3_get_src(ctx, &intr->src[2])[0];
793
794 /* src0 is value, src1 is offset, src2 is uvec2(offset*4, 0)..
795 * nir already *= 4:
796 */
797 src0 = ir3_create_collect(ctx, ir3_get_src(ctx, &intr->src[0]), ncomp);
798 src1 = ir3_SHR_B(b, offset, 0, create_immed(b, 2), 0);
799 src2 = ir3_create_collect(ctx, (struct ir3_instruction*[]){
800 offset,
801 create_immed(b, 0),
802 }, 2);
803
804 stgb = ir3_STGB(b, create_immed(b, const_offset->u32[0]), 0,
805 src0, 0, src1, 0, src2, 0);
806 stgb->cat6.iim_val = ncomp;
807 stgb->cat6.d = 4;
808 stgb->cat6.type = TYPE_U32;
809 stgb->barrier_class = IR3_BARRIER_BUFFER_W;
810 stgb->barrier_conflict = IR3_BARRIER_BUFFER_R | IR3_BARRIER_BUFFER_W;
811
812 array_insert(b, b->keeps, stgb);
813 }
814
815 /* src[] = { block_index } */
816 static void
817 emit_intrinsic_ssbo_size(struct ir3_context *ctx, nir_intrinsic_instr *intr,
818 struct ir3_instruction **dst)
819 {
820 /* SSBO size stored as a const starting at ssbo_sizes: */
821 unsigned blk_idx = nir_src_as_const_value(intr->src[0])->u32[0];
822 unsigned idx = regid(ctx->so->constbase.ssbo_sizes, 0) +
823 ctx->so->const_layout.ssbo_size.off[blk_idx];
824
825 debug_assert(ctx->so->const_layout.ssbo_size.mask & (1 << blk_idx));
826
827 dst[0] = create_uniform(ctx->block, idx);
828 }
829
830 /*
831 * SSBO atomic intrinsics
832 *
833 * All of the SSBO atomic memory operations read a value from memory,
834 * compute a new value using one of the operations below, write the new
835 * value to memory, and return the original value read.
836 *
837 * All operations take 3 sources except CompSwap that takes 4. These
838 * sources represent:
839 *
840 * 0: The SSBO buffer index.
841 * 1: The offset into the SSBO buffer of the variable that the atomic
842 * operation will operate on.
843 * 2: The data parameter to the atomic function (i.e. the value to add
844 * in ssbo_atomic_add, etc).
845 * 3: For CompSwap only: the second data parameter.
846 */
847 static struct ir3_instruction *
848 emit_intrinsic_atomic_ssbo(struct ir3_context *ctx, nir_intrinsic_instr *intr)
849 {
850 struct ir3_block *b = ctx->block;
851 struct ir3_instruction *atomic, *ssbo, *src0, *src1, *src2, *offset;
852 nir_const_value *const_offset;
853 type_t type = TYPE_U32;
854
855 /* can this be non-const buffer_index? how do we handle that? */
856 const_offset = nir_src_as_const_value(intr->src[0]);
857 compile_assert(ctx, const_offset);
858 ssbo = create_immed(b, const_offset->u32[0]);
859
860 offset = ir3_get_src(ctx, &intr->src[1])[0];
861
862 /* src0 is data (or uvec2(data, compare))
863 * src1 is offset
864 * src2 is uvec2(offset*4, 0) (appears to be 64b byte offset)
865 *
866 * Note that nir already multiplies the offset by four
867 */
868 src0 = ir3_get_src(ctx, &intr->src[2])[0];
869 src1 = ir3_SHR_B(b, offset, 0, create_immed(b, 2), 0);
870 src2 = ir3_create_collect(ctx, (struct ir3_instruction*[]){
871 offset,
872 create_immed(b, 0),
873 }, 2);
874
875 switch (intr->intrinsic) {
876 case nir_intrinsic_ssbo_atomic_add:
877 atomic = ir3_ATOMIC_ADD_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
878 break;
879 case nir_intrinsic_ssbo_atomic_imin:
880 atomic = ir3_ATOMIC_MIN_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
881 type = TYPE_S32;
882 break;
883 case nir_intrinsic_ssbo_atomic_umin:
884 atomic = ir3_ATOMIC_MIN_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
885 break;
886 case nir_intrinsic_ssbo_atomic_imax:
887 atomic = ir3_ATOMIC_MAX_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
888 type = TYPE_S32;
889 break;
890 case nir_intrinsic_ssbo_atomic_umax:
891 atomic = ir3_ATOMIC_MAX_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
892 break;
893 case nir_intrinsic_ssbo_atomic_and:
894 atomic = ir3_ATOMIC_AND_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
895 break;
896 case nir_intrinsic_ssbo_atomic_or:
897 atomic = ir3_ATOMIC_OR_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
898 break;
899 case nir_intrinsic_ssbo_atomic_xor:
900 atomic = ir3_ATOMIC_XOR_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
901 break;
902 case nir_intrinsic_ssbo_atomic_exchange:
903 atomic = ir3_ATOMIC_XCHG_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
904 break;
905 case nir_intrinsic_ssbo_atomic_comp_swap:
906 /* for cmpxchg, src0 is [ui]vec2(data, compare): */
907 src0 = ir3_create_collect(ctx, (struct ir3_instruction*[]){
908 ir3_get_src(ctx, &intr->src[3])[0],
909 src0,
910 }, 2);
911 atomic = ir3_ATOMIC_CMPXCHG_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
912 break;
913 default:
914 unreachable("boo");
915 }
916
917 atomic->cat6.iim_val = 1;
918 atomic->cat6.d = 4;
919 atomic->cat6.type = type;
920 atomic->barrier_class = IR3_BARRIER_BUFFER_W;
921 atomic->barrier_conflict = IR3_BARRIER_BUFFER_R | IR3_BARRIER_BUFFER_W;
922
923 /* even if nothing consume the result, we can't DCE the instruction: */
924 array_insert(b, b->keeps, atomic);
925
926 return atomic;
927 }
928
929 /* src[] = { offset }. const_index[] = { base } */
930 static void
931 emit_intrinsic_load_shared(struct ir3_context *ctx, nir_intrinsic_instr *intr,
932 struct ir3_instruction **dst)
933 {
934 struct ir3_block *b = ctx->block;
935 struct ir3_instruction *ldl, *offset;
936 unsigned base;
937
938 offset = ir3_get_src(ctx, &intr->src[0])[0];
939 base = nir_intrinsic_base(intr);
940
941 ldl = ir3_LDL(b, offset, 0, create_immed(b, intr->num_components), 0);
942 ldl->cat6.src_offset = base;
943 ldl->cat6.type = utype_dst(intr->dest);
944 ldl->regs[0]->wrmask = MASK(intr->num_components);
945
946 ldl->barrier_class = IR3_BARRIER_SHARED_R;
947 ldl->barrier_conflict = IR3_BARRIER_SHARED_W;
948
949 ir3_split_dest(b, dst, ldl, 0, intr->num_components);
950 }
951
952 /* src[] = { value, offset }. const_index[] = { base, write_mask } */
953 static void
954 emit_intrinsic_store_shared(struct ir3_context *ctx, nir_intrinsic_instr *intr)
955 {
956 struct ir3_block *b = ctx->block;
957 struct ir3_instruction *stl, *offset;
958 struct ir3_instruction * const *value;
959 unsigned base, wrmask;
960
961 value = ir3_get_src(ctx, &intr->src[0]);
962 offset = ir3_get_src(ctx, &intr->src[1])[0];
963
964 base = nir_intrinsic_base(intr);
965 wrmask = nir_intrinsic_write_mask(intr);
966
967 /* Combine groups of consecutive enabled channels in one write
968 * message. We use ffs to find the first enabled channel and then ffs on
969 * the bit-inverse, down-shifted writemask to determine the length of
970 * the block of enabled bits.
971 *
972 * (trick stolen from i965's fs_visitor::nir_emit_cs_intrinsic())
973 */
974 while (wrmask) {
975 unsigned first_component = ffs(wrmask) - 1;
976 unsigned length = ffs(~(wrmask >> first_component)) - 1;
977
978 stl = ir3_STL(b, offset, 0,
979 ir3_create_collect(ctx, &value[first_component], length), 0,
980 create_immed(b, length), 0);
981 stl->cat6.dst_offset = first_component + base;
982 stl->cat6.type = utype_src(intr->src[0]);
983 stl->barrier_class = IR3_BARRIER_SHARED_W;
984 stl->barrier_conflict = IR3_BARRIER_SHARED_R | IR3_BARRIER_SHARED_W;
985
986 array_insert(b, b->keeps, stl);
987
988 /* Clear the bits in the writemask that we just wrote, then try
989 * again to see if more channels are left.
990 */
991 wrmask &= (15 << (first_component + length));
992 }
993 }
994
995 /*
996 * CS shared variable atomic intrinsics
997 *
998 * All of the shared variable atomic memory operations read a value from
999 * memory, compute a new value using one of the operations below, write the
1000 * new value to memory, and return the original value read.
1001 *
1002 * All operations take 2 sources except CompSwap that takes 3. These
1003 * sources represent:
1004 *
1005 * 0: The offset into the shared variable storage region that the atomic
1006 * operation will operate on.
1007 * 1: The data parameter to the atomic function (i.e. the value to add
1008 * in shared_atomic_add, etc).
1009 * 2: For CompSwap only: the second data parameter.
1010 */
1011 static struct ir3_instruction *
1012 emit_intrinsic_atomic_shared(struct ir3_context *ctx, nir_intrinsic_instr *intr)
1013 {
1014 struct ir3_block *b = ctx->block;
1015 struct ir3_instruction *atomic, *src0, *src1;
1016 type_t type = TYPE_U32;
1017
1018 src0 = ir3_get_src(ctx, &intr->src[0])[0]; /* offset */
1019 src1 = ir3_get_src(ctx, &intr->src[1])[0]; /* value */
1020
1021 switch (intr->intrinsic) {
1022 case nir_intrinsic_shared_atomic_add:
1023 atomic = ir3_ATOMIC_ADD(b, src0, 0, src1, 0);
1024 break;
1025 case nir_intrinsic_shared_atomic_imin:
1026 atomic = ir3_ATOMIC_MIN(b, src0, 0, src1, 0);
1027 type = TYPE_S32;
1028 break;
1029 case nir_intrinsic_shared_atomic_umin:
1030 atomic = ir3_ATOMIC_MIN(b, src0, 0, src1, 0);
1031 break;
1032 case nir_intrinsic_shared_atomic_imax:
1033 atomic = ir3_ATOMIC_MAX(b, src0, 0, src1, 0);
1034 type = TYPE_S32;
1035 break;
1036 case nir_intrinsic_shared_atomic_umax:
1037 atomic = ir3_ATOMIC_MAX(b, src0, 0, src1, 0);
1038 break;
1039 case nir_intrinsic_shared_atomic_and:
1040 atomic = ir3_ATOMIC_AND(b, src0, 0, src1, 0);
1041 break;
1042 case nir_intrinsic_shared_atomic_or:
1043 atomic = ir3_ATOMIC_OR(b, src0, 0, src1, 0);
1044 break;
1045 case nir_intrinsic_shared_atomic_xor:
1046 atomic = ir3_ATOMIC_XOR(b, src0, 0, src1, 0);
1047 break;
1048 case nir_intrinsic_shared_atomic_exchange:
1049 atomic = ir3_ATOMIC_XCHG(b, src0, 0, src1, 0);
1050 break;
1051 case nir_intrinsic_shared_atomic_comp_swap:
1052 /* for cmpxchg, src1 is [ui]vec2(data, compare): */
1053 src1 = ir3_create_collect(ctx, (struct ir3_instruction*[]){
1054 ir3_get_src(ctx, &intr->src[2])[0],
1055 src1,
1056 }, 2);
1057 atomic = ir3_ATOMIC_CMPXCHG(b, src0, 0, src1, 0);
1058 break;
1059 default:
1060 unreachable("boo");
1061 }
1062
1063 atomic->cat6.iim_val = 1;
1064 atomic->cat6.d = 1;
1065 atomic->cat6.type = type;
1066 atomic->barrier_class = IR3_BARRIER_SHARED_W;
1067 atomic->barrier_conflict = IR3_BARRIER_SHARED_R | IR3_BARRIER_SHARED_W;
1068
1069 /* even if nothing consume the result, we can't DCE the instruction: */
1070 array_insert(b, b->keeps, atomic);
1071
1072 return atomic;
1073 }
1074
1075 /* Images get mapped into SSBO/image state (for store/atomic) and texture
1076 * state block (for load). To simplify things, invert the image id and
1077 * map it from end of state block, ie. image 0 becomes num-1, image 1
1078 * becomes num-2, etc. This potentially avoids needing to re-emit texture
1079 * state when switching shaders.
1080 *
1081 * TODO is max # of samplers and SSBOs the same. This shouldn't be hard-
1082 * coded. Also, since all the gl shader stages (ie. everything but CS)
1083 * share the same SSBO/image state block, this might require some more
1084 * logic if we supported images in anything other than FS..
1085 */
1086 static unsigned
1087 get_image_slot(struct ir3_context *ctx, nir_deref_instr *deref)
1088 {
1089 unsigned int loc = 0;
1090 unsigned inner_size = 1;
1091
1092 while (deref->deref_type != nir_deref_type_var) {
1093 assert(deref->deref_type == nir_deref_type_array);
1094 nir_const_value *const_index = nir_src_as_const_value(deref->arr.index);
1095 assert(const_index);
1096
1097 /* Go to the next instruction */
1098 deref = nir_deref_instr_parent(deref);
1099
1100 assert(glsl_type_is_array(deref->type));
1101 const unsigned array_len = glsl_get_length(deref->type);
1102 loc += MIN2(const_index->u32[0], array_len - 1) * inner_size;
1103
1104 /* Update the inner size */
1105 inner_size *= array_len;
1106 }
1107
1108 loc += deref->var->data.driver_location;
1109
1110 /* TODO figure out real limit per generation, and don't hardcode: */
1111 const unsigned max_samplers = 16;
1112 return max_samplers - loc - 1;
1113 }
1114
1115 /* see tex_info() for equiv logic for texture instructions.. it would be
1116 * nice if this could be better unified..
1117 */
1118 static unsigned
1119 get_image_coords(const nir_variable *var, unsigned *flagsp)
1120 {
1121 const struct glsl_type *type = glsl_without_array(var->type);
1122 unsigned coords, flags = 0;
1123
1124 switch (glsl_get_sampler_dim(type)) {
1125 case GLSL_SAMPLER_DIM_1D:
1126 case GLSL_SAMPLER_DIM_BUF:
1127 coords = 1;
1128 break;
1129 case GLSL_SAMPLER_DIM_2D:
1130 case GLSL_SAMPLER_DIM_RECT:
1131 case GLSL_SAMPLER_DIM_EXTERNAL:
1132 case GLSL_SAMPLER_DIM_MS:
1133 coords = 2;
1134 break;
1135 case GLSL_SAMPLER_DIM_3D:
1136 case GLSL_SAMPLER_DIM_CUBE:
1137 flags |= IR3_INSTR_3D;
1138 coords = 3;
1139 break;
1140 default:
1141 unreachable("bad sampler dim");
1142 return 0;
1143 }
1144
1145 if (glsl_sampler_type_is_array(type)) {
1146 /* note: unlike tex_info(), adjust # of coords to include array idx: */
1147 coords++;
1148 flags |= IR3_INSTR_A;
1149 }
1150
1151 if (flagsp)
1152 *flagsp = flags;
1153
1154 return coords;
1155 }
1156
1157 static type_t
1158 get_image_type(const nir_variable *var)
1159 {
1160 switch (glsl_get_sampler_result_type(glsl_without_array(var->type))) {
1161 case GLSL_TYPE_UINT:
1162 return TYPE_U32;
1163 case GLSL_TYPE_INT:
1164 return TYPE_S32;
1165 case GLSL_TYPE_FLOAT:
1166 return TYPE_F32;
1167 default:
1168 unreachable("bad sampler type.");
1169 return 0;
1170 }
1171 }
1172
1173 static struct ir3_instruction *
1174 get_image_offset(struct ir3_context *ctx, const nir_variable *var,
1175 struct ir3_instruction * const *coords, bool byteoff)
1176 {
1177 struct ir3_block *b = ctx->block;
1178 struct ir3_instruction *offset;
1179 unsigned ncoords = get_image_coords(var, NULL);
1180
1181 /* to calculate the byte offset (yes, uggg) we need (up to) three
1182 * const values to know the bytes per pixel, and y and z stride:
1183 */
1184 unsigned cb = regid(ctx->so->constbase.image_dims, 0) +
1185 ctx->so->const_layout.image_dims.off[var->data.driver_location];
1186
1187 debug_assert(ctx->so->const_layout.image_dims.mask &
1188 (1 << var->data.driver_location));
1189
1190 /* offset = coords.x * bytes_per_pixel: */
1191 offset = ir3_MUL_S(b, coords[0], 0, create_uniform(b, cb + 0), 0);
1192 if (ncoords > 1) {
1193 /* offset += coords.y * y_pitch: */
1194 offset = ir3_MAD_S24(b, create_uniform(b, cb + 1), 0,
1195 coords[1], 0, offset, 0);
1196 }
1197 if (ncoords > 2) {
1198 /* offset += coords.z * z_pitch: */
1199 offset = ir3_MAD_S24(b, create_uniform(b, cb + 2), 0,
1200 coords[2], 0, offset, 0);
1201 }
1202
1203 if (!byteoff) {
1204 /* Some cases, like atomics, seem to use dword offset instead
1205 * of byte offsets.. blob just puts an extra shr.b in there
1206 * in those cases:
1207 */
1208 offset = ir3_SHR_B(b, offset, 0, create_immed(b, 2), 0);
1209 }
1210
1211 return ir3_create_collect(ctx, (struct ir3_instruction*[]){
1212 offset,
1213 create_immed(b, 0),
1214 }, 2);
1215 }
1216
1217 /* src[] = { deref, coord, sample_index }. const_index[] = {} */
1218 static void
1219 emit_intrinsic_load_image(struct ir3_context *ctx, nir_intrinsic_instr *intr,
1220 struct ir3_instruction **dst)
1221 {
1222 struct ir3_block *b = ctx->block;
1223 const nir_variable *var = nir_intrinsic_get_var(intr, 0);
1224 struct ir3_instruction *sam;
1225 struct ir3_instruction * const *src0 = ir3_get_src(ctx, &intr->src[1]);
1226 struct ir3_instruction *coords[4];
1227 unsigned flags, ncoords = get_image_coords(var, &flags);
1228 unsigned tex_idx = get_image_slot(ctx, nir_src_as_deref(intr->src[0]));
1229 type_t type = get_image_type(var);
1230
1231 /* hmm, this seems a bit odd, but it is what blob does and (at least
1232 * a5xx) just faults on bogus addresses otherwise:
1233 */
1234 if (flags & IR3_INSTR_3D) {
1235 flags &= ~IR3_INSTR_3D;
1236 flags |= IR3_INSTR_A;
1237 }
1238
1239 for (unsigned i = 0; i < ncoords; i++)
1240 coords[i] = src0[i];
1241
1242 if (ncoords == 1)
1243 coords[ncoords++] = create_immed(b, 0);
1244
1245 sam = ir3_SAM(b, OPC_ISAM, type, 0b1111, flags,
1246 tex_idx, tex_idx, ir3_create_collect(ctx, coords, ncoords), NULL);
1247
1248 sam->barrier_class = IR3_BARRIER_IMAGE_R;
1249 sam->barrier_conflict = IR3_BARRIER_IMAGE_W;
1250
1251 ir3_split_dest(b, dst, sam, 0, 4);
1252 }
1253
1254 /* Returns the number of components for the different image formats
1255 * supported by the GLES 3.1 spec, plus those added by the
1256 * GL_NV_image_formats extension.
1257 */
1258 static unsigned
1259 get_num_components_for_glformat(GLuint format)
1260 {
1261 switch (format) {
1262 case GL_R32F:
1263 case GL_R32I:
1264 case GL_R32UI:
1265 case GL_R16F:
1266 case GL_R16I:
1267 case GL_R16UI:
1268 case GL_R16:
1269 case GL_R16_SNORM:
1270 case GL_R8I:
1271 case GL_R8UI:
1272 case GL_R8:
1273 case GL_R8_SNORM:
1274 return 1;
1275
1276 case GL_RG32F:
1277 case GL_RG32I:
1278 case GL_RG32UI:
1279 case GL_RG16F:
1280 case GL_RG16I:
1281 case GL_RG16UI:
1282 case GL_RG16:
1283 case GL_RG16_SNORM:
1284 case GL_RG8I:
1285 case GL_RG8UI:
1286 case GL_RG8:
1287 case GL_RG8_SNORM:
1288 return 2;
1289
1290 case GL_R11F_G11F_B10F:
1291 return 3;
1292
1293 case GL_RGBA32F:
1294 case GL_RGBA32I:
1295 case GL_RGBA32UI:
1296 case GL_RGBA16F:
1297 case GL_RGBA16I:
1298 case GL_RGBA16UI:
1299 case GL_RGBA16:
1300 case GL_RGBA16_SNORM:
1301 case GL_RGBA8I:
1302 case GL_RGBA8UI:
1303 case GL_RGBA8:
1304 case GL_RGBA8_SNORM:
1305 case GL_RGB10_A2UI:
1306 case GL_RGB10_A2:
1307 return 4;
1308
1309 default:
1310 /* Return 4 components also for all other formats we don't know
1311 * about. This is always safe. Also, the format should have been
1312 * validated already by the higher level API. Drop a debug message
1313 * just in case.
1314 */
1315 debug_printf("Unhandled GL format %u while emitting imageStore()\n",
1316 format);
1317 return 4;
1318 }
1319 }
1320
1321 /* src[] = { deref, coord, sample_index, value }. const_index[] = {} */
1322 static void
1323 emit_intrinsic_store_image(struct ir3_context *ctx, nir_intrinsic_instr *intr)
1324 {
1325 struct ir3_block *b = ctx->block;
1326 const nir_variable *var = nir_intrinsic_get_var(intr, 0);
1327 struct ir3_instruction *stib, *offset;
1328 struct ir3_instruction * const *value = ir3_get_src(ctx, &intr->src[3]);
1329 struct ir3_instruction * const *coords = ir3_get_src(ctx, &intr->src[1]);
1330 unsigned ncoords = get_image_coords(var, NULL);
1331 unsigned tex_idx = get_image_slot(ctx, nir_src_as_deref(intr->src[0]));
1332 unsigned ncomp = get_num_components_for_glformat(var->data.image.format);
1333
1334 /* src0 is value
1335 * src1 is coords
1336 * src2 is 64b byte offset
1337 */
1338
1339 offset = get_image_offset(ctx, var, coords, true);
1340
1341 /* NOTE: stib seems to take byte offset, but stgb.typed can be used
1342 * too and takes a dword offset.. not quite sure yet why blob uses
1343 * one over the other in various cases.
1344 */
1345
1346 stib = ir3_STIB(b, create_immed(b, tex_idx), 0,
1347 ir3_create_collect(ctx, value, ncomp), 0,
1348 ir3_create_collect(ctx, coords, ncoords), 0,
1349 offset, 0);
1350 stib->cat6.iim_val = ncomp;
1351 stib->cat6.d = ncoords;
1352 stib->cat6.type = get_image_type(var);
1353 stib->cat6.typed = true;
1354 stib->barrier_class = IR3_BARRIER_IMAGE_W;
1355 stib->barrier_conflict = IR3_BARRIER_IMAGE_R | IR3_BARRIER_IMAGE_W;
1356
1357 array_insert(b, b->keeps, stib);
1358 }
1359
1360 static void
1361 emit_intrinsic_image_size(struct ir3_context *ctx, nir_intrinsic_instr *intr,
1362 struct ir3_instruction **dst)
1363 {
1364 struct ir3_block *b = ctx->block;
1365 const nir_variable *var = nir_intrinsic_get_var(intr, 0);
1366 unsigned tex_idx = get_image_slot(ctx, nir_src_as_deref(intr->src[0]));
1367 struct ir3_instruction *sam, *lod;
1368 unsigned flags, ncoords = get_image_coords(var, &flags);
1369
1370 lod = create_immed(b, 0);
1371 sam = ir3_SAM(b, OPC_GETSIZE, TYPE_U32, 0b1111, flags,
1372 tex_idx, tex_idx, lod, NULL);
1373
1374 /* Array size actually ends up in .w rather than .z. This doesn't
1375 * matter for miplevel 0, but for higher mips the value in z is
1376 * minified whereas w stays. Also, the value in TEX_CONST_3_DEPTH is
1377 * returned, which means that we have to add 1 to it for arrays for
1378 * a3xx.
1379 *
1380 * Note use a temporary dst and then copy, since the size of the dst
1381 * array that is passed in is based on nir's understanding of the
1382 * result size, not the hardware's
1383 */
1384 struct ir3_instruction *tmp[4];
1385
1386 ir3_split_dest(b, tmp, sam, 0, 4);
1387
1388 /* get_size instruction returns size in bytes instead of texels
1389 * for imageBuffer, so we need to divide it by the pixel size
1390 * of the image format.
1391 *
1392 * TODO: This is at least true on a5xx. Check other gens.
1393 */
1394 enum glsl_sampler_dim dim =
1395 glsl_get_sampler_dim(glsl_without_array(var->type));
1396 if (dim == GLSL_SAMPLER_DIM_BUF) {
1397 /* Since all the possible values the divisor can take are
1398 * power-of-two (4, 8, or 16), the division is implemented
1399 * as a shift-right.
1400 * During shader setup, the log2 of the image format's
1401 * bytes-per-pixel should have been emitted in 2nd slot of
1402 * image_dims. See ir3_shader::emit_image_dims().
1403 */
1404 unsigned cb = regid(ctx->so->constbase.image_dims, 0) +
1405 ctx->so->const_layout.image_dims.off[var->data.driver_location];
1406 struct ir3_instruction *aux = create_uniform(b, cb + 1);
1407
1408 tmp[0] = ir3_SHR_B(b, tmp[0], 0, aux, 0);
1409 }
1410
1411 for (unsigned i = 0; i < ncoords; i++)
1412 dst[i] = tmp[i];
1413
1414 if (flags & IR3_INSTR_A) {
1415 if (ctx->compiler->levels_add_one) {
1416 dst[ncoords-1] = ir3_ADD_U(b, tmp[3], 0, create_immed(b, 1), 0);
1417 } else {
1418 dst[ncoords-1] = ir3_MOV(b, tmp[3], TYPE_U32);
1419 }
1420 }
1421 }
1422
1423 /* src[] = { deref, coord, sample_index, value, compare }. const_index[] = {} */
1424 static struct ir3_instruction *
1425 emit_intrinsic_atomic_image(struct ir3_context *ctx, nir_intrinsic_instr *intr)
1426 {
1427 struct ir3_block *b = ctx->block;
1428 const nir_variable *var = nir_intrinsic_get_var(intr, 0);
1429 struct ir3_instruction *atomic, *image, *src0, *src1, *src2;
1430 struct ir3_instruction * const *coords = ir3_get_src(ctx, &intr->src[1]);
1431 unsigned ncoords = get_image_coords(var, NULL);
1432
1433 image = create_immed(b, get_image_slot(ctx, nir_src_as_deref(intr->src[0])));
1434
1435 /* src0 is value (or uvec2(value, compare))
1436 * src1 is coords
1437 * src2 is 64b byte offset
1438 */
1439 src0 = ir3_get_src(ctx, &intr->src[3])[0];
1440 src1 = ir3_create_collect(ctx, coords, ncoords);
1441 src2 = get_image_offset(ctx, var, coords, false);
1442
1443 switch (intr->intrinsic) {
1444 case nir_intrinsic_image_deref_atomic_add:
1445 atomic = ir3_ATOMIC_ADD_G(b, image, 0, src0, 0, src1, 0, src2, 0);
1446 break;
1447 case nir_intrinsic_image_deref_atomic_min:
1448 atomic = ir3_ATOMIC_MIN_G(b, image, 0, src0, 0, src1, 0, src2, 0);
1449 break;
1450 case nir_intrinsic_image_deref_atomic_max:
1451 atomic = ir3_ATOMIC_MAX_G(b, image, 0, src0, 0, src1, 0, src2, 0);
1452 break;
1453 case nir_intrinsic_image_deref_atomic_and:
1454 atomic = ir3_ATOMIC_AND_G(b, image, 0, src0, 0, src1, 0, src2, 0);
1455 break;
1456 case nir_intrinsic_image_deref_atomic_or:
1457 atomic = ir3_ATOMIC_OR_G(b, image, 0, src0, 0, src1, 0, src2, 0);
1458 break;
1459 case nir_intrinsic_image_deref_atomic_xor:
1460 atomic = ir3_ATOMIC_XOR_G(b, image, 0, src0, 0, src1, 0, src2, 0);
1461 break;
1462 case nir_intrinsic_image_deref_atomic_exchange:
1463 atomic = ir3_ATOMIC_XCHG_G(b, image, 0, src0, 0, src1, 0, src2, 0);
1464 break;
1465 case nir_intrinsic_image_deref_atomic_comp_swap:
1466 /* for cmpxchg, src0 is [ui]vec2(data, compare): */
1467 src0 = ir3_create_collect(ctx, (struct ir3_instruction*[]){
1468 ir3_get_src(ctx, &intr->src[4])[0],
1469 src0,
1470 }, 2);
1471 atomic = ir3_ATOMIC_CMPXCHG_G(b, image, 0, src0, 0, src1, 0, src2, 0);
1472 break;
1473 default:
1474 unreachable("boo");
1475 }
1476
1477 atomic->cat6.iim_val = 1;
1478 atomic->cat6.d = ncoords;
1479 atomic->cat6.type = get_image_type(var);
1480 atomic->cat6.typed = true;
1481 atomic->barrier_class = IR3_BARRIER_IMAGE_W;
1482 atomic->barrier_conflict = IR3_BARRIER_IMAGE_R | IR3_BARRIER_IMAGE_W;
1483
1484 /* even if nothing consume the result, we can't DCE the instruction: */
1485 array_insert(b, b->keeps, atomic);
1486
1487 return atomic;
1488 }
1489
1490 static void
1491 emit_intrinsic_barrier(struct ir3_context *ctx, nir_intrinsic_instr *intr)
1492 {
1493 struct ir3_block *b = ctx->block;
1494 struct ir3_instruction *barrier;
1495
1496 switch (intr->intrinsic) {
1497 case nir_intrinsic_barrier:
1498 barrier = ir3_BAR(b);
1499 barrier->cat7.g = true;
1500 barrier->cat7.l = true;
1501 barrier->flags = IR3_INSTR_SS | IR3_INSTR_SY;
1502 barrier->barrier_class = IR3_BARRIER_EVERYTHING;
1503 break;
1504 case nir_intrinsic_memory_barrier:
1505 barrier = ir3_FENCE(b);
1506 barrier->cat7.g = true;
1507 barrier->cat7.r = true;
1508 barrier->cat7.w = true;
1509 barrier->barrier_class = IR3_BARRIER_IMAGE_W |
1510 IR3_BARRIER_BUFFER_W;
1511 barrier->barrier_conflict =
1512 IR3_BARRIER_IMAGE_R | IR3_BARRIER_IMAGE_W |
1513 IR3_BARRIER_BUFFER_R | IR3_BARRIER_BUFFER_W;
1514 break;
1515 case nir_intrinsic_memory_barrier_atomic_counter:
1516 case nir_intrinsic_memory_barrier_buffer:
1517 barrier = ir3_FENCE(b);
1518 barrier->cat7.g = true;
1519 barrier->cat7.r = true;
1520 barrier->cat7.w = true;
1521 barrier->barrier_class = IR3_BARRIER_BUFFER_W;
1522 barrier->barrier_conflict = IR3_BARRIER_BUFFER_R |
1523 IR3_BARRIER_BUFFER_W;
1524 break;
1525 case nir_intrinsic_memory_barrier_image:
1526 // TODO double check if this should have .g set
1527 barrier = ir3_FENCE(b);
1528 barrier->cat7.g = true;
1529 barrier->cat7.r = true;
1530 barrier->cat7.w = true;
1531 barrier->barrier_class = IR3_BARRIER_IMAGE_W;
1532 barrier->barrier_conflict = IR3_BARRIER_IMAGE_R |
1533 IR3_BARRIER_IMAGE_W;
1534 break;
1535 case nir_intrinsic_memory_barrier_shared:
1536 barrier = ir3_FENCE(b);
1537 barrier->cat7.g = true;
1538 barrier->cat7.l = true;
1539 barrier->cat7.r = true;
1540 barrier->cat7.w = true;
1541 barrier->barrier_class = IR3_BARRIER_SHARED_W;
1542 barrier->barrier_conflict = IR3_BARRIER_SHARED_R |
1543 IR3_BARRIER_SHARED_W;
1544 break;
1545 case nir_intrinsic_group_memory_barrier:
1546 barrier = ir3_FENCE(b);
1547 barrier->cat7.g = true;
1548 barrier->cat7.l = true;
1549 barrier->cat7.r = true;
1550 barrier->cat7.w = true;
1551 barrier->barrier_class = IR3_BARRIER_SHARED_W |
1552 IR3_BARRIER_IMAGE_W |
1553 IR3_BARRIER_BUFFER_W;
1554 barrier->barrier_conflict =
1555 IR3_BARRIER_SHARED_R | IR3_BARRIER_SHARED_W |
1556 IR3_BARRIER_IMAGE_R | IR3_BARRIER_IMAGE_W |
1557 IR3_BARRIER_BUFFER_R | IR3_BARRIER_BUFFER_W;
1558 break;
1559 default:
1560 unreachable("boo");
1561 }
1562
1563 /* make sure barrier doesn't get DCE'd */
1564 array_insert(b, b->keeps, barrier);
1565 }
1566
1567 static void add_sysval_input_compmask(struct ir3_context *ctx,
1568 gl_system_value slot, unsigned compmask,
1569 struct ir3_instruction *instr)
1570 {
1571 struct ir3_shader_variant *so = ctx->so;
1572 unsigned r = regid(so->inputs_count, 0);
1573 unsigned n = so->inputs_count++;
1574
1575 so->inputs[n].sysval = true;
1576 so->inputs[n].slot = slot;
1577 so->inputs[n].compmask = compmask;
1578 so->inputs[n].regid = r;
1579 so->inputs[n].interpolate = INTERP_MODE_FLAT;
1580 so->total_in++;
1581
1582 ctx->ir->ninputs = MAX2(ctx->ir->ninputs, r + 1);
1583 ctx->ir->inputs[r] = instr;
1584 }
1585
1586 static void add_sysval_input(struct ir3_context *ctx, gl_system_value slot,
1587 struct ir3_instruction *instr)
1588 {
1589 add_sysval_input_compmask(ctx, slot, 0x1, instr);
1590 }
1591
1592 static void
1593 emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr)
1594 {
1595 const nir_intrinsic_info *info = &nir_intrinsic_infos[intr->intrinsic];
1596 struct ir3_instruction **dst;
1597 struct ir3_instruction * const *src;
1598 struct ir3_block *b = ctx->block;
1599 nir_const_value *const_offset;
1600 int idx, comp;
1601
1602 if (info->has_dest) {
1603 unsigned n = nir_intrinsic_dest_components(intr);
1604 dst = ir3_get_dst(ctx, &intr->dest, n);
1605 } else {
1606 dst = NULL;
1607 }
1608
1609 switch (intr->intrinsic) {
1610 case nir_intrinsic_load_uniform:
1611 idx = nir_intrinsic_base(intr);
1612 const_offset = nir_src_as_const_value(intr->src[0]);
1613 if (const_offset) {
1614 idx += const_offset->u32[0];
1615 for (int i = 0; i < intr->num_components; i++) {
1616 unsigned n = idx * 4 + i;
1617 dst[i] = create_uniform(b, n);
1618 }
1619 } else {
1620 src = ir3_get_src(ctx, &intr->src[0]);
1621 for (int i = 0; i < intr->num_components; i++) {
1622 int n = idx * 4 + i;
1623 dst[i] = create_uniform_indirect(b, n,
1624 ir3_get_addr(ctx, src[0], 4));
1625 }
1626 /* NOTE: if relative addressing is used, we set
1627 * constlen in the compiler (to worst-case value)
1628 * since we don't know in the assembler what the max
1629 * addr reg value can be:
1630 */
1631 ctx->so->constlen = ctx->s->num_uniforms;
1632 }
1633 break;
1634 case nir_intrinsic_load_ubo:
1635 emit_intrinsic_load_ubo(ctx, intr, dst);
1636 break;
1637 case nir_intrinsic_load_input:
1638 idx = nir_intrinsic_base(intr);
1639 comp = nir_intrinsic_component(intr);
1640 const_offset = nir_src_as_const_value(intr->src[0]);
1641 if (const_offset) {
1642 idx += const_offset->u32[0];
1643 for (int i = 0; i < intr->num_components; i++) {
1644 unsigned n = idx * 4 + i + comp;
1645 dst[i] = ctx->ir->inputs[n];
1646 }
1647 } else {
1648 src = ir3_get_src(ctx, &intr->src[0]);
1649 struct ir3_instruction *collect =
1650 ir3_create_collect(ctx, ctx->ir->inputs, ctx->ir->ninputs);
1651 struct ir3_instruction *addr = ir3_get_addr(ctx, src[0], 4);
1652 for (int i = 0; i < intr->num_components; i++) {
1653 unsigned n = idx * 4 + i + comp;
1654 dst[i] = create_indirect_load(ctx, ctx->ir->ninputs,
1655 n, addr, collect);
1656 }
1657 }
1658 break;
1659 case nir_intrinsic_load_ssbo:
1660 emit_intrinsic_load_ssbo(ctx, intr, dst);
1661 break;
1662 case nir_intrinsic_store_ssbo:
1663 emit_intrinsic_store_ssbo(ctx, intr);
1664 break;
1665 case nir_intrinsic_get_buffer_size:
1666 emit_intrinsic_ssbo_size(ctx, intr, dst);
1667 break;
1668 case nir_intrinsic_ssbo_atomic_add:
1669 case nir_intrinsic_ssbo_atomic_imin:
1670 case nir_intrinsic_ssbo_atomic_umin:
1671 case nir_intrinsic_ssbo_atomic_imax:
1672 case nir_intrinsic_ssbo_atomic_umax:
1673 case nir_intrinsic_ssbo_atomic_and:
1674 case nir_intrinsic_ssbo_atomic_or:
1675 case nir_intrinsic_ssbo_atomic_xor:
1676 case nir_intrinsic_ssbo_atomic_exchange:
1677 case nir_intrinsic_ssbo_atomic_comp_swap:
1678 dst[0] = emit_intrinsic_atomic_ssbo(ctx, intr);
1679 break;
1680 case nir_intrinsic_load_shared:
1681 emit_intrinsic_load_shared(ctx, intr, dst);
1682 break;
1683 case nir_intrinsic_store_shared:
1684 emit_intrinsic_store_shared(ctx, intr);
1685 break;
1686 case nir_intrinsic_shared_atomic_add:
1687 case nir_intrinsic_shared_atomic_imin:
1688 case nir_intrinsic_shared_atomic_umin:
1689 case nir_intrinsic_shared_atomic_imax:
1690 case nir_intrinsic_shared_atomic_umax:
1691 case nir_intrinsic_shared_atomic_and:
1692 case nir_intrinsic_shared_atomic_or:
1693 case nir_intrinsic_shared_atomic_xor:
1694 case nir_intrinsic_shared_atomic_exchange:
1695 case nir_intrinsic_shared_atomic_comp_swap:
1696 dst[0] = emit_intrinsic_atomic_shared(ctx, intr);
1697 break;
1698 case nir_intrinsic_image_deref_load:
1699 emit_intrinsic_load_image(ctx, intr, dst);
1700 break;
1701 case nir_intrinsic_image_deref_store:
1702 emit_intrinsic_store_image(ctx, intr);
1703 break;
1704 case nir_intrinsic_image_deref_size:
1705 emit_intrinsic_image_size(ctx, intr, dst);
1706 break;
1707 case nir_intrinsic_image_deref_atomic_add:
1708 case nir_intrinsic_image_deref_atomic_min:
1709 case nir_intrinsic_image_deref_atomic_max:
1710 case nir_intrinsic_image_deref_atomic_and:
1711 case nir_intrinsic_image_deref_atomic_or:
1712 case nir_intrinsic_image_deref_atomic_xor:
1713 case nir_intrinsic_image_deref_atomic_exchange:
1714 case nir_intrinsic_image_deref_atomic_comp_swap:
1715 dst[0] = emit_intrinsic_atomic_image(ctx, intr);
1716 break;
1717 case nir_intrinsic_barrier:
1718 case nir_intrinsic_memory_barrier:
1719 case nir_intrinsic_group_memory_barrier:
1720 case nir_intrinsic_memory_barrier_atomic_counter:
1721 case nir_intrinsic_memory_barrier_buffer:
1722 case nir_intrinsic_memory_barrier_image:
1723 case nir_intrinsic_memory_barrier_shared:
1724 emit_intrinsic_barrier(ctx, intr);
1725 /* note that blk ptr no longer valid, make that obvious: */
1726 b = NULL;
1727 break;
1728 case nir_intrinsic_store_output:
1729 idx = nir_intrinsic_base(intr);
1730 comp = nir_intrinsic_component(intr);
1731 const_offset = nir_src_as_const_value(intr->src[1]);
1732 compile_assert(ctx, const_offset != NULL);
1733 idx += const_offset->u32[0];
1734
1735 src = ir3_get_src(ctx, &intr->src[0]);
1736 for (int i = 0; i < intr->num_components; i++) {
1737 unsigned n = idx * 4 + i + comp;
1738 ctx->ir->outputs[n] = src[i];
1739 }
1740 break;
1741 case nir_intrinsic_load_base_vertex:
1742 case nir_intrinsic_load_first_vertex:
1743 if (!ctx->basevertex) {
1744 ctx->basevertex = create_driver_param(ctx, IR3_DP_VTXID_BASE);
1745 add_sysval_input(ctx, SYSTEM_VALUE_FIRST_VERTEX, ctx->basevertex);
1746 }
1747 dst[0] = ctx->basevertex;
1748 break;
1749 case nir_intrinsic_load_vertex_id_zero_base:
1750 case nir_intrinsic_load_vertex_id:
1751 if (!ctx->vertex_id) {
1752 gl_system_value sv = (intr->intrinsic == nir_intrinsic_load_vertex_id) ?
1753 SYSTEM_VALUE_VERTEX_ID : SYSTEM_VALUE_VERTEX_ID_ZERO_BASE;
1754 ctx->vertex_id = create_input(ctx, 0);
1755 add_sysval_input(ctx, sv, ctx->vertex_id);
1756 }
1757 dst[0] = ctx->vertex_id;
1758 break;
1759 case nir_intrinsic_load_instance_id:
1760 if (!ctx->instance_id) {
1761 ctx->instance_id = create_input(ctx, 0);
1762 add_sysval_input(ctx, SYSTEM_VALUE_INSTANCE_ID,
1763 ctx->instance_id);
1764 }
1765 dst[0] = ctx->instance_id;
1766 break;
1767 case nir_intrinsic_load_sample_id:
1768 case nir_intrinsic_load_sample_id_no_per_sample:
1769 if (!ctx->samp_id) {
1770 ctx->samp_id = create_input(ctx, 0);
1771 ctx->samp_id->regs[0]->flags |= IR3_REG_HALF;
1772 add_sysval_input(ctx, SYSTEM_VALUE_SAMPLE_ID,
1773 ctx->samp_id);
1774 }
1775 dst[0] = ir3_COV(b, ctx->samp_id, TYPE_U16, TYPE_U32);
1776 break;
1777 case nir_intrinsic_load_sample_mask_in:
1778 if (!ctx->samp_mask_in) {
1779 ctx->samp_mask_in = create_input(ctx, 0);
1780 add_sysval_input(ctx, SYSTEM_VALUE_SAMPLE_MASK_IN,
1781 ctx->samp_mask_in);
1782 }
1783 dst[0] = ctx->samp_mask_in;
1784 break;
1785 case nir_intrinsic_load_user_clip_plane:
1786 idx = nir_intrinsic_ucp_id(intr);
1787 for (int i = 0; i < intr->num_components; i++) {
1788 unsigned n = idx * 4 + i;
1789 dst[i] = create_driver_param(ctx, IR3_DP_UCP0_X + n);
1790 }
1791 break;
1792 case nir_intrinsic_load_front_face:
1793 if (!ctx->frag_face) {
1794 ctx->so->frag_face = true;
1795 ctx->frag_face = create_input(ctx, 0);
1796 add_sysval_input(ctx, SYSTEM_VALUE_FRONT_FACE, ctx->frag_face);
1797 ctx->frag_face->regs[0]->flags |= IR3_REG_HALF;
1798 }
1799 /* for fragface, we get -1 for back and 0 for front. However this is
1800 * the inverse of what nir expects (where ~0 is true).
1801 */
1802 dst[0] = ir3_COV(b, ctx->frag_face, TYPE_S16, TYPE_S32);
1803 dst[0] = ir3_NOT_B(b, dst[0], 0);
1804 break;
1805 case nir_intrinsic_load_local_invocation_id:
1806 if (!ctx->local_invocation_id) {
1807 ctx->local_invocation_id = create_input_compmask(ctx, 0, 0x7);
1808 add_sysval_input_compmask(ctx, SYSTEM_VALUE_LOCAL_INVOCATION_ID,
1809 0x7, ctx->local_invocation_id);
1810 }
1811 ir3_split_dest(b, dst, ctx->local_invocation_id, 0, 3);
1812 break;
1813 case nir_intrinsic_load_work_group_id:
1814 if (!ctx->work_group_id) {
1815 ctx->work_group_id = create_input_compmask(ctx, 0, 0x7);
1816 add_sysval_input_compmask(ctx, SYSTEM_VALUE_WORK_GROUP_ID,
1817 0x7, ctx->work_group_id);
1818 ctx->work_group_id->regs[0]->flags |= IR3_REG_HIGH;
1819 }
1820 ir3_split_dest(b, dst, ctx->work_group_id, 0, 3);
1821 break;
1822 case nir_intrinsic_load_num_work_groups:
1823 for (int i = 0; i < intr->num_components; i++) {
1824 dst[i] = create_driver_param(ctx, IR3_DP_NUM_WORK_GROUPS_X + i);
1825 }
1826 break;
1827 case nir_intrinsic_load_local_group_size:
1828 for (int i = 0; i < intr->num_components; i++) {
1829 dst[i] = create_driver_param(ctx, IR3_DP_LOCAL_GROUP_SIZE_X + i);
1830 }
1831 break;
1832 case nir_intrinsic_discard_if:
1833 case nir_intrinsic_discard: {
1834 struct ir3_instruction *cond, *kill;
1835
1836 if (intr->intrinsic == nir_intrinsic_discard_if) {
1837 /* conditional discard: */
1838 src = ir3_get_src(ctx, &intr->src[0]);
1839 cond = ir3_b2n(b, src[0]);
1840 } else {
1841 /* unconditional discard: */
1842 cond = create_immed(b, 1);
1843 }
1844
1845 /* NOTE: only cmps.*.* can write p0.x: */
1846 cond = ir3_CMPS_S(b, cond, 0, create_immed(b, 0), 0);
1847 cond->cat2.condition = IR3_COND_NE;
1848
1849 /* condition always goes in predicate register: */
1850 cond->regs[0]->num = regid(REG_P0, 0);
1851
1852 kill = ir3_KILL(b, cond, 0);
1853 array_insert(ctx->ir, ctx->ir->predicates, kill);
1854
1855 array_insert(b, b->keeps, kill);
1856 ctx->so->has_kill = true;
1857
1858 break;
1859 }
1860 default:
1861 ir3_context_error(ctx, "Unhandled intrinsic type: %s\n",
1862 nir_intrinsic_infos[intr->intrinsic].name);
1863 break;
1864 }
1865
1866 if (info->has_dest)
1867 put_dst(ctx, &intr->dest);
1868 }
1869
1870 static void
1871 emit_load_const(struct ir3_context *ctx, nir_load_const_instr *instr)
1872 {
1873 struct ir3_instruction **dst = ir3_get_dst_ssa(ctx, &instr->def,
1874 instr->def.num_components);
1875 type_t type = (instr->def.bit_size < 32) ? TYPE_U16 : TYPE_U32;
1876
1877 for (int i = 0; i < instr->def.num_components; i++)
1878 dst[i] = create_immed_typed(ctx->block, instr->value.u32[i], type);
1879 }
1880
1881 static void
1882 emit_undef(struct ir3_context *ctx, nir_ssa_undef_instr *undef)
1883 {
1884 struct ir3_instruction **dst = ir3_get_dst_ssa(ctx, &undef->def,
1885 undef->def.num_components);
1886 type_t type = (undef->def.bit_size < 32) ? TYPE_U16 : TYPE_U32;
1887
1888 /* backend doesn't want undefined instructions, so just plug
1889 * in 0.0..
1890 */
1891 for (int i = 0; i < undef->def.num_components; i++)
1892 dst[i] = create_immed_typed(ctx->block, fui(0.0), type);
1893 }
1894
1895 /*
1896 * texture fetch/sample instructions:
1897 */
1898
1899 static void
1900 tex_info(nir_tex_instr *tex, unsigned *flagsp, unsigned *coordsp)
1901 {
1902 unsigned coords, flags = 0;
1903
1904 /* note: would use tex->coord_components.. except txs.. also,
1905 * since array index goes after shadow ref, we don't want to
1906 * count it:
1907 */
1908 switch (tex->sampler_dim) {
1909 case GLSL_SAMPLER_DIM_1D:
1910 case GLSL_SAMPLER_DIM_BUF:
1911 coords = 1;
1912 break;
1913 case GLSL_SAMPLER_DIM_2D:
1914 case GLSL_SAMPLER_DIM_RECT:
1915 case GLSL_SAMPLER_DIM_EXTERNAL:
1916 case GLSL_SAMPLER_DIM_MS:
1917 coords = 2;
1918 break;
1919 case GLSL_SAMPLER_DIM_3D:
1920 case GLSL_SAMPLER_DIM_CUBE:
1921 coords = 3;
1922 flags |= IR3_INSTR_3D;
1923 break;
1924 default:
1925 unreachable("bad sampler_dim");
1926 }
1927
1928 if (tex->is_shadow && tex->op != nir_texop_lod)
1929 flags |= IR3_INSTR_S;
1930
1931 if (tex->is_array && tex->op != nir_texop_lod)
1932 flags |= IR3_INSTR_A;
1933
1934 *flagsp = flags;
1935 *coordsp = coords;
1936 }
1937
1938 static void
1939 emit_tex(struct ir3_context *ctx, nir_tex_instr *tex)
1940 {
1941 struct ir3_block *b = ctx->block;
1942 struct ir3_instruction **dst, *sam, *src0[12], *src1[4];
1943 struct ir3_instruction * const *coord, * const *off, * const *ddx, * const *ddy;
1944 struct ir3_instruction *lod, *compare, *proj, *sample_index;
1945 bool has_bias = false, has_lod = false, has_proj = false, has_off = false;
1946 unsigned i, coords, flags;
1947 unsigned nsrc0 = 0, nsrc1 = 0;
1948 type_t type;
1949 opc_t opc = 0;
1950
1951 coord = off = ddx = ddy = NULL;
1952 lod = proj = compare = sample_index = NULL;
1953
1954 /* TODO: might just be one component for gathers? */
1955 dst = ir3_get_dst(ctx, &tex->dest, 4);
1956
1957 for (unsigned i = 0; i < tex->num_srcs; i++) {
1958 switch (tex->src[i].src_type) {
1959 case nir_tex_src_coord:
1960 coord = ir3_get_src(ctx, &tex->src[i].src);
1961 break;
1962 case nir_tex_src_bias:
1963 lod = ir3_get_src(ctx, &tex->src[i].src)[0];
1964 has_bias = true;
1965 break;
1966 case nir_tex_src_lod:
1967 lod = ir3_get_src(ctx, &tex->src[i].src)[0];
1968 has_lod = true;
1969 break;
1970 case nir_tex_src_comparator: /* shadow comparator */
1971 compare = ir3_get_src(ctx, &tex->src[i].src)[0];
1972 break;
1973 case nir_tex_src_projector:
1974 proj = ir3_get_src(ctx, &tex->src[i].src)[0];
1975 has_proj = true;
1976 break;
1977 case nir_tex_src_offset:
1978 off = ir3_get_src(ctx, &tex->src[i].src);
1979 has_off = true;
1980 break;
1981 case nir_tex_src_ddx:
1982 ddx = ir3_get_src(ctx, &tex->src[i].src);
1983 break;
1984 case nir_tex_src_ddy:
1985 ddy = ir3_get_src(ctx, &tex->src[i].src);
1986 break;
1987 case nir_tex_src_ms_index:
1988 sample_index = ir3_get_src(ctx, &tex->src[i].src)[0];
1989 break;
1990 default:
1991 ir3_context_error(ctx, "Unhandled NIR tex src type: %d\n",
1992 tex->src[i].src_type);
1993 return;
1994 }
1995 }
1996
1997 switch (tex->op) {
1998 case nir_texop_tex: opc = has_lod ? OPC_SAML : OPC_SAM; break;
1999 case nir_texop_txb: opc = OPC_SAMB; break;
2000 case nir_texop_txl: opc = OPC_SAML; break;
2001 case nir_texop_txd: opc = OPC_SAMGQ; break;
2002 case nir_texop_txf: opc = OPC_ISAML; break;
2003 case nir_texop_lod: opc = OPC_GETLOD; break;
2004 case nir_texop_tg4:
2005 /* NOTE: a4xx might need to emulate gather w/ txf (this is
2006 * what blob does, seems gather is broken?), and a3xx did
2007 * not support it (but probably could also emulate).
2008 */
2009 switch (tex->component) {
2010 case 0: opc = OPC_GATHER4R; break;
2011 case 1: opc = OPC_GATHER4G; break;
2012 case 2: opc = OPC_GATHER4B; break;
2013 case 3: opc = OPC_GATHER4A; break;
2014 }
2015 break;
2016 case nir_texop_txf_ms: opc = OPC_ISAMM; break;
2017 case nir_texop_txs:
2018 case nir_texop_query_levels:
2019 case nir_texop_texture_samples:
2020 case nir_texop_samples_identical:
2021 case nir_texop_txf_ms_mcs:
2022 ir3_context_error(ctx, "Unhandled NIR tex type: %d\n", tex->op);
2023 return;
2024 }
2025
2026 tex_info(tex, &flags, &coords);
2027
2028 /*
2029 * lay out the first argument in the proper order:
2030 * - actual coordinates first
2031 * - shadow reference
2032 * - array index
2033 * - projection w
2034 * - starting at offset 4, dpdx.xy, dpdy.xy
2035 *
2036 * bias/lod go into the second arg
2037 */
2038
2039 /* insert tex coords: */
2040 for (i = 0; i < coords; i++)
2041 src0[i] = coord[i];
2042
2043 nsrc0 = i;
2044
2045 /* NOTE a3xx (and possibly a4xx?) might be different, using isaml
2046 * with scaled x coord according to requested sample:
2047 */
2048 if (tex->op == nir_texop_txf_ms) {
2049 if (ctx->compiler->txf_ms_with_isaml) {
2050 /* the samples are laid out in x dimension as
2051 * 0 1 2 3
2052 * x_ms = (x << ms) + sample_index;
2053 */
2054 struct ir3_instruction *ms;
2055 ms = create_immed(b, (ctx->samples >> (2 * tex->texture_index)) & 3);
2056
2057 src0[0] = ir3_SHL_B(b, src0[0], 0, ms, 0);
2058 src0[0] = ir3_ADD_U(b, src0[0], 0, sample_index, 0);
2059
2060 opc = OPC_ISAML;
2061 } else {
2062 src0[nsrc0++] = sample_index;
2063 }
2064 }
2065
2066 /* scale up integer coords for TXF based on the LOD */
2067 if (ctx->compiler->unminify_coords && (opc == OPC_ISAML)) {
2068 assert(has_lod);
2069 for (i = 0; i < coords; i++)
2070 src0[i] = ir3_SHL_B(b, src0[i], 0, lod, 0);
2071 }
2072
2073 if (coords == 1) {
2074 /* hw doesn't do 1d, so we treat it as 2d with
2075 * height of 1, and patch up the y coord.
2076 * TODO: y coord should be (int)0 in some cases..
2077 */
2078 src0[nsrc0++] = create_immed(b, fui(0.5));
2079 }
2080
2081 if (tex->is_shadow && tex->op != nir_texop_lod)
2082 src0[nsrc0++] = compare;
2083
2084 if (tex->is_array && tex->op != nir_texop_lod) {
2085 struct ir3_instruction *idx = coord[coords];
2086
2087 /* the array coord for cube arrays needs 0.5 added to it */
2088 if (ctx->compiler->array_index_add_half && (opc != OPC_ISAML))
2089 idx = ir3_ADD_F(b, idx, 0, create_immed(b, fui(0.5)), 0);
2090
2091 src0[nsrc0++] = idx;
2092 }
2093
2094 if (has_proj) {
2095 src0[nsrc0++] = proj;
2096 flags |= IR3_INSTR_P;
2097 }
2098
2099 /* pad to 4, then ddx/ddy: */
2100 if (tex->op == nir_texop_txd) {
2101 while (nsrc0 < 4)
2102 src0[nsrc0++] = create_immed(b, fui(0.0));
2103 for (i = 0; i < coords; i++)
2104 src0[nsrc0++] = ddx[i];
2105 if (coords < 2)
2106 src0[nsrc0++] = create_immed(b, fui(0.0));
2107 for (i = 0; i < coords; i++)
2108 src0[nsrc0++] = ddy[i];
2109 if (coords < 2)
2110 src0[nsrc0++] = create_immed(b, fui(0.0));
2111 }
2112
2113 /*
2114 * second argument (if applicable):
2115 * - offsets
2116 * - lod
2117 * - bias
2118 */
2119 if (has_off | has_lod | has_bias) {
2120 if (has_off) {
2121 unsigned off_coords = coords;
2122 if (tex->sampler_dim == GLSL_SAMPLER_DIM_CUBE)
2123 off_coords--;
2124 for (i = 0; i < off_coords; i++)
2125 src1[nsrc1++] = off[i];
2126 if (off_coords < 2)
2127 src1[nsrc1++] = create_immed(b, fui(0.0));
2128 flags |= IR3_INSTR_O;
2129 }
2130
2131 if (has_lod | has_bias)
2132 src1[nsrc1++] = lod;
2133 }
2134
2135 switch (tex->dest_type) {
2136 case nir_type_invalid:
2137 case nir_type_float:
2138 type = TYPE_F32;
2139 break;
2140 case nir_type_int:
2141 type = TYPE_S32;
2142 break;
2143 case nir_type_uint:
2144 case nir_type_bool:
2145 type = TYPE_U32;
2146 break;
2147 default:
2148 unreachable("bad dest_type");
2149 }
2150
2151 if (opc == OPC_GETLOD)
2152 type = TYPE_U32;
2153
2154 unsigned tex_idx = tex->texture_index;
2155
2156 ctx->max_texture_index = MAX2(ctx->max_texture_index, tex_idx);
2157
2158 struct ir3_instruction *col0 = ir3_create_collect(ctx, src0, nsrc0);
2159 struct ir3_instruction *col1 = ir3_create_collect(ctx, src1, nsrc1);
2160
2161 sam = ir3_SAM(b, opc, type, 0b1111, flags,
2162 tex_idx, tex_idx, col0, col1);
2163
2164 if ((ctx->astc_srgb & (1 << tex_idx)) && !nir_tex_instr_is_query(tex)) {
2165 /* only need first 3 components: */
2166 sam->regs[0]->wrmask = 0x7;
2167 ir3_split_dest(b, dst, sam, 0, 3);
2168
2169 /* we need to sample the alpha separately with a non-ASTC
2170 * texture state:
2171 */
2172 sam = ir3_SAM(b, opc, type, 0b1000, flags,
2173 tex_idx, tex_idx, col0, col1);
2174
2175 array_insert(ctx->ir, ctx->ir->astc_srgb, sam);
2176
2177 /* fixup .w component: */
2178 ir3_split_dest(b, &dst[3], sam, 3, 1);
2179 } else {
2180 /* normal (non-workaround) case: */
2181 ir3_split_dest(b, dst, sam, 0, 4);
2182 }
2183
2184 /* GETLOD returns results in 4.8 fixed point */
2185 if (opc == OPC_GETLOD) {
2186 struct ir3_instruction *factor = create_immed(b, fui(1.0 / 256));
2187
2188 compile_assert(ctx, tex->dest_type == nir_type_float);
2189 for (i = 0; i < 2; i++) {
2190 dst[i] = ir3_MUL_F(b, ir3_COV(b, dst[i], TYPE_U32, TYPE_F32), 0,
2191 factor, 0);
2192 }
2193 }
2194
2195 put_dst(ctx, &tex->dest);
2196 }
2197
2198 static void
2199 emit_tex_query_levels(struct ir3_context *ctx, nir_tex_instr *tex)
2200 {
2201 struct ir3_block *b = ctx->block;
2202 struct ir3_instruction **dst, *sam;
2203
2204 dst = ir3_get_dst(ctx, &tex->dest, 1);
2205
2206 sam = ir3_SAM(b, OPC_GETINFO, TYPE_U32, 0b0100, 0,
2207 tex->texture_index, tex->texture_index, NULL, NULL);
2208
2209 /* even though there is only one component, since it ends
2210 * up in .z rather than .x, we need a split_dest()
2211 */
2212 ir3_split_dest(b, dst, sam, 0, 3);
2213
2214 /* The # of levels comes from getinfo.z. We need to add 1 to it, since
2215 * the value in TEX_CONST_0 is zero-based.
2216 */
2217 if (ctx->compiler->levels_add_one)
2218 dst[0] = ir3_ADD_U(b, dst[0], 0, create_immed(b, 1), 0);
2219
2220 put_dst(ctx, &tex->dest);
2221 }
2222
2223 static void
2224 emit_tex_txs(struct ir3_context *ctx, nir_tex_instr *tex)
2225 {
2226 struct ir3_block *b = ctx->block;
2227 struct ir3_instruction **dst, *sam;
2228 struct ir3_instruction *lod;
2229 unsigned flags, coords;
2230
2231 tex_info(tex, &flags, &coords);
2232
2233 /* Actually we want the number of dimensions, not coordinates. This
2234 * distinction only matters for cubes.
2235 */
2236 if (tex->sampler_dim == GLSL_SAMPLER_DIM_CUBE)
2237 coords = 2;
2238
2239 dst = ir3_get_dst(ctx, &tex->dest, 4);
2240
2241 compile_assert(ctx, tex->num_srcs == 1);
2242 compile_assert(ctx, tex->src[0].src_type == nir_tex_src_lod);
2243
2244 lod = ir3_get_src(ctx, &tex->src[0].src)[0];
2245
2246 sam = ir3_SAM(b, OPC_GETSIZE, TYPE_U32, 0b1111, flags,
2247 tex->texture_index, tex->texture_index, lod, NULL);
2248
2249 ir3_split_dest(b, dst, sam, 0, 4);
2250
2251 /* Array size actually ends up in .w rather than .z. This doesn't
2252 * matter for miplevel 0, but for higher mips the value in z is
2253 * minified whereas w stays. Also, the value in TEX_CONST_3_DEPTH is
2254 * returned, which means that we have to add 1 to it for arrays.
2255 */
2256 if (tex->is_array) {
2257 if (ctx->compiler->levels_add_one) {
2258 dst[coords] = ir3_ADD_U(b, dst[3], 0, create_immed(b, 1), 0);
2259 } else {
2260 dst[coords] = ir3_MOV(b, dst[3], TYPE_U32);
2261 }
2262 }
2263
2264 put_dst(ctx, &tex->dest);
2265 }
2266
2267 static void
2268 emit_jump(struct ir3_context *ctx, nir_jump_instr *jump)
2269 {
2270 switch (jump->type) {
2271 case nir_jump_break:
2272 case nir_jump_continue:
2273 case nir_jump_return:
2274 /* I *think* we can simply just ignore this, and use the
2275 * successor block link to figure out where we need to
2276 * jump to for break/continue
2277 */
2278 break;
2279 default:
2280 ir3_context_error(ctx, "Unhandled NIR jump type: %d\n", jump->type);
2281 break;
2282 }
2283 }
2284
2285 static void
2286 emit_instr(struct ir3_context *ctx, nir_instr *instr)
2287 {
2288 switch (instr->type) {
2289 case nir_instr_type_alu:
2290 emit_alu(ctx, nir_instr_as_alu(instr));
2291 break;
2292 case nir_instr_type_deref:
2293 /* ignored, handled as part of the intrinsic they are src to */
2294 break;
2295 case nir_instr_type_intrinsic:
2296 emit_intrinsic(ctx, nir_instr_as_intrinsic(instr));
2297 break;
2298 case nir_instr_type_load_const:
2299 emit_load_const(ctx, nir_instr_as_load_const(instr));
2300 break;
2301 case nir_instr_type_ssa_undef:
2302 emit_undef(ctx, nir_instr_as_ssa_undef(instr));
2303 break;
2304 case nir_instr_type_tex: {
2305 nir_tex_instr *tex = nir_instr_as_tex(instr);
2306 /* couple tex instructions get special-cased:
2307 */
2308 switch (tex->op) {
2309 case nir_texop_txs:
2310 emit_tex_txs(ctx, tex);
2311 break;
2312 case nir_texop_query_levels:
2313 emit_tex_query_levels(ctx, tex);
2314 break;
2315 default:
2316 emit_tex(ctx, tex);
2317 break;
2318 }
2319 break;
2320 }
2321 case nir_instr_type_jump:
2322 emit_jump(ctx, nir_instr_as_jump(instr));
2323 break;
2324 case nir_instr_type_phi:
2325 /* we have converted phi webs to regs in NIR by now */
2326 ir3_context_error(ctx, "Unexpected NIR instruction type: %d\n", instr->type);
2327 break;
2328 case nir_instr_type_call:
2329 case nir_instr_type_parallel_copy:
2330 ir3_context_error(ctx, "Unhandled NIR instruction type: %d\n", instr->type);
2331 break;
2332 }
2333 }
2334
2335 static struct ir3_block *
2336 get_block(struct ir3_context *ctx, const nir_block *nblock)
2337 {
2338 struct ir3_block *block;
2339 struct hash_entry *hentry;
2340 unsigned i;
2341
2342 hentry = _mesa_hash_table_search(ctx->block_ht, nblock);
2343 if (hentry)
2344 return hentry->data;
2345
2346 block = ir3_block_create(ctx->ir);
2347 block->nblock = nblock;
2348 _mesa_hash_table_insert(ctx->block_ht, nblock, block);
2349
2350 block->predecessors_count = nblock->predecessors->entries;
2351 block->predecessors = ralloc_array_size(block,
2352 sizeof(block->predecessors[0]), block->predecessors_count);
2353 i = 0;
2354 set_foreach(nblock->predecessors, sentry) {
2355 block->predecessors[i++] = get_block(ctx, sentry->key);
2356 }
2357
2358 return block;
2359 }
2360
2361 static void
2362 emit_block(struct ir3_context *ctx, nir_block *nblock)
2363 {
2364 struct ir3_block *block = get_block(ctx, nblock);
2365
2366 for (int i = 0; i < ARRAY_SIZE(block->successors); i++) {
2367 if (nblock->successors[i]) {
2368 block->successors[i] =
2369 get_block(ctx, nblock->successors[i]);
2370 }
2371 }
2372
2373 ctx->block = block;
2374 list_addtail(&block->node, &ctx->ir->block_list);
2375
2376 /* re-emit addr register in each block if needed: */
2377 for (int i = 0; i < ARRAY_SIZE(ctx->addr_ht); i++) {
2378 _mesa_hash_table_destroy(ctx->addr_ht[i], NULL);
2379 ctx->addr_ht[i] = NULL;
2380 }
2381
2382 nir_foreach_instr(instr, nblock) {
2383 ctx->cur_instr = instr;
2384 emit_instr(ctx, instr);
2385 ctx->cur_instr = NULL;
2386 if (ctx->error)
2387 return;
2388 }
2389 }
2390
2391 static void emit_cf_list(struct ir3_context *ctx, struct exec_list *list);
2392
2393 static void
2394 emit_if(struct ir3_context *ctx, nir_if *nif)
2395 {
2396 struct ir3_instruction *condition = ir3_get_src(ctx, &nif->condition)[0];
2397
2398 ctx->block->condition =
2399 ir3_get_predicate(ctx, ir3_b2n(condition->block, condition));
2400
2401 emit_cf_list(ctx, &nif->then_list);
2402 emit_cf_list(ctx, &nif->else_list);
2403 }
2404
2405 static void
2406 emit_loop(struct ir3_context *ctx, nir_loop *nloop)
2407 {
2408 emit_cf_list(ctx, &nloop->body);
2409 }
2410
2411 static void
2412 stack_push(struct ir3_context *ctx)
2413 {
2414 ctx->stack++;
2415 ctx->max_stack = MAX2(ctx->max_stack, ctx->stack);
2416 }
2417
2418 static void
2419 stack_pop(struct ir3_context *ctx)
2420 {
2421 compile_assert(ctx, ctx->stack > 0);
2422 ctx->stack--;
2423 }
2424
2425 static void
2426 emit_cf_list(struct ir3_context *ctx, struct exec_list *list)
2427 {
2428 foreach_list_typed(nir_cf_node, node, node, list) {
2429 switch (node->type) {
2430 case nir_cf_node_block:
2431 emit_block(ctx, nir_cf_node_as_block(node));
2432 break;
2433 case nir_cf_node_if:
2434 stack_push(ctx);
2435 emit_if(ctx, nir_cf_node_as_if(node));
2436 stack_pop(ctx);
2437 break;
2438 case nir_cf_node_loop:
2439 stack_push(ctx);
2440 emit_loop(ctx, nir_cf_node_as_loop(node));
2441 stack_pop(ctx);
2442 break;
2443 case nir_cf_node_function:
2444 ir3_context_error(ctx, "TODO\n");
2445 break;
2446 }
2447 }
2448 }
2449
2450 /* emit stream-out code. At this point, the current block is the original
2451 * (nir) end block, and nir ensures that all flow control paths terminate
2452 * into the end block. We re-purpose the original end block to generate
2453 * the 'if (vtxcnt < maxvtxcnt)' condition, then append the conditional
2454 * block holding stream-out write instructions, followed by the new end
2455 * block:
2456 *
2457 * blockOrigEnd {
2458 * p0.x = (vtxcnt < maxvtxcnt)
2459 * // succs: blockStreamOut, blockNewEnd
2460 * }
2461 * blockStreamOut {
2462 * ... stream-out instructions ...
2463 * // succs: blockNewEnd
2464 * }
2465 * blockNewEnd {
2466 * }
2467 */
2468 static void
2469 emit_stream_out(struct ir3_context *ctx)
2470 {
2471 struct ir3_shader_variant *v = ctx->so;
2472 struct ir3 *ir = ctx->ir;
2473 struct ir3_stream_output_info *strmout =
2474 &ctx->so->shader->stream_output;
2475 struct ir3_block *orig_end_block, *stream_out_block, *new_end_block;
2476 struct ir3_instruction *vtxcnt, *maxvtxcnt, *cond;
2477 struct ir3_instruction *bases[IR3_MAX_SO_BUFFERS];
2478
2479 /* create vtxcnt input in input block at top of shader,
2480 * so that it is seen as live over the entire duration
2481 * of the shader:
2482 */
2483 vtxcnt = create_input(ctx, 0);
2484 add_sysval_input(ctx, SYSTEM_VALUE_VERTEX_CNT, vtxcnt);
2485
2486 maxvtxcnt = create_driver_param(ctx, IR3_DP_VTXCNT_MAX);
2487
2488 /* at this point, we are at the original 'end' block,
2489 * re-purpose this block to stream-out condition, then
2490 * append stream-out block and new-end block
2491 */
2492 orig_end_block = ctx->block;
2493
2494 // TODO these blocks need to update predecessors..
2495 // maybe w/ store_global intrinsic, we could do this
2496 // stuff in nir->nir pass
2497
2498 stream_out_block = ir3_block_create(ir);
2499 list_addtail(&stream_out_block->node, &ir->block_list);
2500
2501 new_end_block = ir3_block_create(ir);
2502 list_addtail(&new_end_block->node, &ir->block_list);
2503
2504 orig_end_block->successors[0] = stream_out_block;
2505 orig_end_block->successors[1] = new_end_block;
2506 stream_out_block->successors[0] = new_end_block;
2507
2508 /* setup 'if (vtxcnt < maxvtxcnt)' condition: */
2509 cond = ir3_CMPS_S(ctx->block, vtxcnt, 0, maxvtxcnt, 0);
2510 cond->regs[0]->num = regid(REG_P0, 0);
2511 cond->cat2.condition = IR3_COND_LT;
2512
2513 /* condition goes on previous block to the conditional,
2514 * since it is used to pick which of the two successor
2515 * paths to take:
2516 */
2517 orig_end_block->condition = cond;
2518
2519 /* switch to stream_out_block to generate the stream-out
2520 * instructions:
2521 */
2522 ctx->block = stream_out_block;
2523
2524 /* Calculate base addresses based on vtxcnt. Instructions
2525 * generated for bases not used in following loop will be
2526 * stripped out in the backend.
2527 */
2528 for (unsigned i = 0; i < IR3_MAX_SO_BUFFERS; i++) {
2529 unsigned stride = strmout->stride[i];
2530 struct ir3_instruction *base, *off;
2531
2532 base = create_uniform(ctx->block, regid(v->constbase.tfbo, i));
2533
2534 /* 24-bit should be enough: */
2535 off = ir3_MUL_U(ctx->block, vtxcnt, 0,
2536 create_immed(ctx->block, stride * 4), 0);
2537
2538 bases[i] = ir3_ADD_S(ctx->block, off, 0, base, 0);
2539 }
2540
2541 /* Generate the per-output store instructions: */
2542 for (unsigned i = 0; i < strmout->num_outputs; i++) {
2543 for (unsigned j = 0; j < strmout->output[i].num_components; j++) {
2544 unsigned c = j + strmout->output[i].start_component;
2545 struct ir3_instruction *base, *out, *stg;
2546
2547 base = bases[strmout->output[i].output_buffer];
2548 out = ctx->ir->outputs[regid(strmout->output[i].register_index, c)];
2549
2550 stg = ir3_STG(ctx->block, base, 0, out, 0,
2551 create_immed(ctx->block, 1), 0);
2552 stg->cat6.type = TYPE_U32;
2553 stg->cat6.dst_offset = (strmout->output[i].dst_offset + j) * 4;
2554
2555 array_insert(ctx->block, ctx->block->keeps, stg);
2556 }
2557 }
2558
2559 /* and finally switch to the new_end_block: */
2560 ctx->block = new_end_block;
2561 }
2562
2563 static void
2564 emit_function(struct ir3_context *ctx, nir_function_impl *impl)
2565 {
2566 nir_metadata_require(impl, nir_metadata_block_index);
2567
2568 compile_assert(ctx, ctx->stack == 0);
2569
2570 emit_cf_list(ctx, &impl->body);
2571 emit_block(ctx, impl->end_block);
2572
2573 compile_assert(ctx, ctx->stack == 0);
2574
2575 /* at this point, we should have a single empty block,
2576 * into which we emit the 'end' instruction.
2577 */
2578 compile_assert(ctx, list_empty(&ctx->block->instr_list));
2579
2580 /* If stream-out (aka transform-feedback) enabled, emit the
2581 * stream-out instructions, followed by a new empty block (into
2582 * which the 'end' instruction lands).
2583 *
2584 * NOTE: it is done in this order, rather than inserting before
2585 * we emit end_block, because NIR guarantees that all blocks
2586 * flow into end_block, and that end_block has no successors.
2587 * So by re-purposing end_block as the first block of stream-
2588 * out, we guarantee that all exit paths flow into the stream-
2589 * out instructions.
2590 */
2591 if ((ctx->compiler->gpu_id < 500) &&
2592 (ctx->so->shader->stream_output.num_outputs > 0) &&
2593 !ctx->so->binning_pass) {
2594 debug_assert(ctx->so->type == MESA_SHADER_VERTEX);
2595 emit_stream_out(ctx);
2596 }
2597
2598 ir3_END(ctx->block);
2599 }
2600
2601 static struct ir3_instruction *
2602 create_frag_coord(struct ir3_context *ctx, unsigned comp)
2603 {
2604 struct ir3_block *block = ctx->block;
2605 struct ir3_instruction *instr;
2606
2607 if (!ctx->frag_coord) {
2608 ctx->frag_coord = create_input_compmask(ctx, 0, 0xf);
2609 /* defer add_sysval_input() until after all inputs created */
2610 }
2611
2612 ir3_split_dest(block, &instr, ctx->frag_coord, comp, 1);
2613
2614 switch (comp) {
2615 case 0: /* .x */
2616 case 1: /* .y */
2617 /* for frag_coord, we get unsigned values.. we need
2618 * to subtract (integer) 8 and divide by 16 (right-
2619 * shift by 4) then convert to float:
2620 *
2621 * sub.s tmp, src, 8
2622 * shr.b tmp, tmp, 4
2623 * mov.u32f32 dst, tmp
2624 *
2625 */
2626 instr = ir3_SUB_S(block, instr, 0,
2627 create_immed(block, 8), 0);
2628 instr = ir3_SHR_B(block, instr, 0,
2629 create_immed(block, 4), 0);
2630 instr = ir3_COV(block, instr, TYPE_U32, TYPE_F32);
2631
2632 return instr;
2633 case 2: /* .z */
2634 case 3: /* .w */
2635 default:
2636 /* seems that we can use these as-is: */
2637 return instr;
2638 }
2639 }
2640
2641 static void
2642 setup_input(struct ir3_context *ctx, nir_variable *in)
2643 {
2644 struct ir3_shader_variant *so = ctx->so;
2645 unsigned ncomp = glsl_get_components(in->type);
2646 unsigned n = in->data.driver_location;
2647 unsigned slot = in->data.location;
2648
2649 /* let's pretend things other than vec4 don't exist: */
2650 ncomp = MAX2(ncomp, 4);
2651
2652 /* skip unread inputs, we could end up with (for example), unsplit
2653 * matrix/etc inputs in the case they are not read, so just silently
2654 * skip these.
2655 */
2656 if (ncomp > 4)
2657 return;
2658
2659 compile_assert(ctx, ncomp == 4);
2660
2661 so->inputs[n].slot = slot;
2662 so->inputs[n].compmask = (1 << ncomp) - 1;
2663 so->inputs_count = MAX2(so->inputs_count, n + 1);
2664 so->inputs[n].interpolate = in->data.interpolation;
2665
2666 if (ctx->so->type == MESA_SHADER_FRAGMENT) {
2667 for (int i = 0; i < ncomp; i++) {
2668 struct ir3_instruction *instr = NULL;
2669 unsigned idx = (n * 4) + i;
2670
2671 if (slot == VARYING_SLOT_POS) {
2672 so->inputs[n].bary = false;
2673 so->frag_coord = true;
2674 instr = create_frag_coord(ctx, i);
2675 } else if (slot == VARYING_SLOT_PNTC) {
2676 /* see for example st_nir_fixup_varying_slots().. this is
2677 * maybe a bit mesa/st specific. But we need things to line
2678 * up for this in fdN_program:
2679 * unsigned texmask = 1 << (slot - VARYING_SLOT_VAR0);
2680 * if (emit->sprite_coord_enable & texmask) {
2681 * ...
2682 * }
2683 */
2684 so->inputs[n].slot = VARYING_SLOT_VAR8;
2685 so->inputs[n].bary = true;
2686 instr = create_frag_input(ctx, false);
2687 } else {
2688 bool use_ldlv = false;
2689
2690 /* detect the special case for front/back colors where
2691 * we need to do flat vs smooth shading depending on
2692 * rast state:
2693 */
2694 if (in->data.interpolation == INTERP_MODE_NONE) {
2695 switch (slot) {
2696 case VARYING_SLOT_COL0:
2697 case VARYING_SLOT_COL1:
2698 case VARYING_SLOT_BFC0:
2699 case VARYING_SLOT_BFC1:
2700 so->inputs[n].rasterflat = true;
2701 break;
2702 default:
2703 break;
2704 }
2705 }
2706
2707 if (ctx->compiler->flat_bypass) {
2708 if ((so->inputs[n].interpolate == INTERP_MODE_FLAT) ||
2709 (so->inputs[n].rasterflat && ctx->so->key.rasterflat))
2710 use_ldlv = true;
2711 }
2712
2713 so->inputs[n].bary = true;
2714
2715 instr = create_frag_input(ctx, use_ldlv);
2716 }
2717
2718 compile_assert(ctx, idx < ctx->ir->ninputs);
2719
2720 ctx->ir->inputs[idx] = instr;
2721 }
2722 } else if (ctx->so->type == MESA_SHADER_VERTEX) {
2723 for (int i = 0; i < ncomp; i++) {
2724 unsigned idx = (n * 4) + i;
2725 compile_assert(ctx, idx < ctx->ir->ninputs);
2726 ctx->ir->inputs[idx] = create_input(ctx, idx);
2727 }
2728 } else {
2729 ir3_context_error(ctx, "unknown shader type: %d\n", ctx->so->type);
2730 }
2731
2732 if (so->inputs[n].bary || (ctx->so->type == MESA_SHADER_VERTEX)) {
2733 so->total_in += ncomp;
2734 }
2735 }
2736
2737 static void
2738 setup_output(struct ir3_context *ctx, nir_variable *out)
2739 {
2740 struct ir3_shader_variant *so = ctx->so;
2741 unsigned ncomp = glsl_get_components(out->type);
2742 unsigned n = out->data.driver_location;
2743 unsigned slot = out->data.location;
2744 unsigned comp = 0;
2745
2746 /* let's pretend things other than vec4 don't exist: */
2747 ncomp = MAX2(ncomp, 4);
2748 compile_assert(ctx, ncomp == 4);
2749
2750 if (ctx->so->type == MESA_SHADER_FRAGMENT) {
2751 switch (slot) {
2752 case FRAG_RESULT_DEPTH:
2753 comp = 2; /* tgsi will write to .z component */
2754 so->writes_pos = true;
2755 break;
2756 case FRAG_RESULT_COLOR:
2757 so->color0_mrt = 1;
2758 break;
2759 default:
2760 if (slot >= FRAG_RESULT_DATA0)
2761 break;
2762 ir3_context_error(ctx, "unknown FS output name: %s\n",
2763 gl_frag_result_name(slot));
2764 }
2765 } else if (ctx->so->type == MESA_SHADER_VERTEX) {
2766 switch (slot) {
2767 case VARYING_SLOT_POS:
2768 so->writes_pos = true;
2769 break;
2770 case VARYING_SLOT_PSIZ:
2771 so->writes_psize = true;
2772 break;
2773 case VARYING_SLOT_COL0:
2774 case VARYING_SLOT_COL1:
2775 case VARYING_SLOT_BFC0:
2776 case VARYING_SLOT_BFC1:
2777 case VARYING_SLOT_FOGC:
2778 case VARYING_SLOT_CLIP_DIST0:
2779 case VARYING_SLOT_CLIP_DIST1:
2780 case VARYING_SLOT_CLIP_VERTEX:
2781 break;
2782 default:
2783 if (slot >= VARYING_SLOT_VAR0)
2784 break;
2785 if ((VARYING_SLOT_TEX0 <= slot) && (slot <= VARYING_SLOT_TEX7))
2786 break;
2787 ir3_context_error(ctx, "unknown VS output name: %s\n",
2788 gl_varying_slot_name(slot));
2789 }
2790 } else {
2791 ir3_context_error(ctx, "unknown shader type: %d\n", ctx->so->type);
2792 }
2793
2794 compile_assert(ctx, n < ARRAY_SIZE(so->outputs));
2795
2796 so->outputs[n].slot = slot;
2797 so->outputs[n].regid = regid(n, comp);
2798 so->outputs_count = MAX2(so->outputs_count, n + 1);
2799
2800 for (int i = 0; i < ncomp; i++) {
2801 unsigned idx = (n * 4) + i;
2802 compile_assert(ctx, idx < ctx->ir->noutputs);
2803 ctx->ir->outputs[idx] = create_immed(ctx->block, fui(0.0));
2804 }
2805 }
2806
2807 static int
2808 max_drvloc(struct exec_list *vars)
2809 {
2810 int drvloc = -1;
2811 nir_foreach_variable(var, vars) {
2812 drvloc = MAX2(drvloc, (int)var->data.driver_location);
2813 }
2814 return drvloc;
2815 }
2816
2817 static const unsigned max_sysvals[] = {
2818 [MESA_SHADER_FRAGMENT] = 24, // TODO
2819 [MESA_SHADER_VERTEX] = 16,
2820 [MESA_SHADER_COMPUTE] = 16, // TODO how many do we actually need?
2821 };
2822
2823 static void
2824 emit_instructions(struct ir3_context *ctx)
2825 {
2826 unsigned ninputs, noutputs;
2827 nir_function_impl *fxn = nir_shader_get_entrypoint(ctx->s);
2828
2829 ninputs = (max_drvloc(&ctx->s->inputs) + 1) * 4;
2830 noutputs = (max_drvloc(&ctx->s->outputs) + 1) * 4;
2831
2832 /* we need to leave room for sysvals:
2833 */
2834 ninputs += max_sysvals[ctx->so->type];
2835
2836 ctx->ir = ir3_create(ctx->compiler, ninputs, noutputs);
2837
2838 /* Create inputs in first block: */
2839 ctx->block = get_block(ctx, nir_start_block(fxn));
2840 ctx->in_block = ctx->block;
2841 list_addtail(&ctx->block->node, &ctx->ir->block_list);
2842
2843 ninputs -= max_sysvals[ctx->so->type];
2844
2845 /* for fragment shader, the vcoord input register is used as the
2846 * base for bary.f varying fetch instrs:
2847 */
2848 struct ir3_instruction *vcoord = NULL;
2849 if (ctx->so->type == MESA_SHADER_FRAGMENT) {
2850 struct ir3_instruction *xy[2];
2851
2852 vcoord = create_input_compmask(ctx, 0, 0x3);
2853 ir3_split_dest(ctx->block, xy, vcoord, 0, 2);
2854
2855 ctx->frag_vcoord = ir3_create_collect(ctx, xy, 2);
2856 }
2857
2858 /* Setup inputs: */
2859 nir_foreach_variable(var, &ctx->s->inputs) {
2860 setup_input(ctx, var);
2861 }
2862
2863 /* Defer add_sysval_input() stuff until after setup_inputs(),
2864 * because sysvals need to be appended after varyings:
2865 */
2866 if (vcoord) {
2867 add_sysval_input_compmask(ctx, SYSTEM_VALUE_VARYING_COORD,
2868 0x3, vcoord);
2869 }
2870
2871 if (ctx->frag_coord) {
2872 add_sysval_input_compmask(ctx, SYSTEM_VALUE_FRAG_COORD,
2873 0xf, ctx->frag_coord);
2874 }
2875
2876 /* Setup outputs: */
2877 nir_foreach_variable(var, &ctx->s->outputs) {
2878 setup_output(ctx, var);
2879 }
2880
2881 /* Setup registers (which should only be arrays): */
2882 nir_foreach_register(reg, &ctx->s->registers) {
2883 ir3_declare_array(ctx, reg);
2884 }
2885
2886 /* NOTE: need to do something more clever when we support >1 fxn */
2887 nir_foreach_register(reg, &fxn->registers) {
2888 ir3_declare_array(ctx, reg);
2889 }
2890 /* And emit the body: */
2891 ctx->impl = fxn;
2892 emit_function(ctx, fxn);
2893 }
2894
2895 /* from NIR perspective, we actually have varying inputs. But the varying
2896 * inputs, from an IR standpoint, are just bary.f/ldlv instructions. The
2897 * only actual inputs are the sysvals.
2898 */
2899 static void
2900 fixup_frag_inputs(struct ir3_context *ctx)
2901 {
2902 struct ir3_shader_variant *so = ctx->so;
2903 struct ir3 *ir = ctx->ir;
2904 unsigned i = 0;
2905
2906 /* sysvals should appear at the end of the inputs, drop everything else: */
2907 while ((i < so->inputs_count) && !so->inputs[i].sysval)
2908 i++;
2909
2910 /* at IR level, inputs are always blocks of 4 scalars: */
2911 i *= 4;
2912
2913 ir->inputs = &ir->inputs[i];
2914 ir->ninputs -= i;
2915 }
2916
2917 /* Fixup tex sampler state for astc/srgb workaround instructions. We
2918 * need to assign the tex state indexes for these after we know the
2919 * max tex index.
2920 */
2921 static void
2922 fixup_astc_srgb(struct ir3_context *ctx)
2923 {
2924 struct ir3_shader_variant *so = ctx->so;
2925 /* indexed by original tex idx, value is newly assigned alpha sampler
2926 * state tex idx. Zero is invalid since there is at least one sampler
2927 * if we get here.
2928 */
2929 unsigned alt_tex_state[16] = {0};
2930 unsigned tex_idx = ctx->max_texture_index + 1;
2931 unsigned idx = 0;
2932
2933 so->astc_srgb.base = tex_idx;
2934
2935 for (unsigned i = 0; i < ctx->ir->astc_srgb_count; i++) {
2936 struct ir3_instruction *sam = ctx->ir->astc_srgb[i];
2937
2938 compile_assert(ctx, sam->cat5.tex < ARRAY_SIZE(alt_tex_state));
2939
2940 if (alt_tex_state[sam->cat5.tex] == 0) {
2941 /* assign new alternate/alpha tex state slot: */
2942 alt_tex_state[sam->cat5.tex] = tex_idx++;
2943 so->astc_srgb.orig_idx[idx++] = sam->cat5.tex;
2944 so->astc_srgb.count++;
2945 }
2946
2947 sam->cat5.tex = alt_tex_state[sam->cat5.tex];
2948 }
2949 }
2950
2951 static void
2952 fixup_binning_pass(struct ir3_context *ctx)
2953 {
2954 struct ir3_shader_variant *so = ctx->so;
2955 struct ir3 *ir = ctx->ir;
2956 unsigned i, j;
2957
2958 for (i = 0, j = 0; i < so->outputs_count; i++) {
2959 unsigned slot = so->outputs[i].slot;
2960
2961 /* throw away everything but first position/psize */
2962 if ((slot == VARYING_SLOT_POS) || (slot == VARYING_SLOT_PSIZ)) {
2963 if (i != j) {
2964 so->outputs[j] = so->outputs[i];
2965 ir->outputs[(j*4)+0] = ir->outputs[(i*4)+0];
2966 ir->outputs[(j*4)+1] = ir->outputs[(i*4)+1];
2967 ir->outputs[(j*4)+2] = ir->outputs[(i*4)+2];
2968 ir->outputs[(j*4)+3] = ir->outputs[(i*4)+3];
2969 }
2970 j++;
2971 }
2972 }
2973 so->outputs_count = j;
2974 ir->noutputs = j * 4;
2975 }
2976
2977 int
2978 ir3_compile_shader_nir(struct ir3_compiler *compiler,
2979 struct ir3_shader_variant *so)
2980 {
2981 struct ir3_context *ctx;
2982 struct ir3 *ir;
2983 struct ir3_instruction **inputs;
2984 unsigned i, actual_in, inloc;
2985 int ret = 0, max_bary;
2986
2987 assert(!so->ir);
2988
2989 ctx = ir3_context_init(compiler, so);
2990 if (!ctx) {
2991 DBG("INIT failed!");
2992 ret = -1;
2993 goto out;
2994 }
2995
2996 emit_instructions(ctx);
2997
2998 if (ctx->error) {
2999 DBG("EMIT failed!");
3000 ret = -1;
3001 goto out;
3002 }
3003
3004 ir = so->ir = ctx->ir;
3005
3006 /* keep track of the inputs from TGSI perspective.. */
3007 inputs = ir->inputs;
3008
3009 /* but fixup actual inputs for frag shader: */
3010 if (so->type == MESA_SHADER_FRAGMENT)
3011 fixup_frag_inputs(ctx);
3012
3013 /* at this point, for binning pass, throw away unneeded outputs: */
3014 if (so->binning_pass && (ctx->compiler->gpu_id < 600))
3015 fixup_binning_pass(ctx);
3016
3017 /* if we want half-precision outputs, mark the output registers
3018 * as half:
3019 */
3020 if (so->key.half_precision) {
3021 for (i = 0; i < ir->noutputs; i++) {
3022 struct ir3_instruction *out = ir->outputs[i];
3023
3024 if (!out)
3025 continue;
3026
3027 /* if frag shader writes z, that needs to be full precision: */
3028 if (so->outputs[i/4].slot == FRAG_RESULT_DEPTH)
3029 continue;
3030
3031 out->regs[0]->flags |= IR3_REG_HALF;
3032 /* output could be a fanout (ie. texture fetch output)
3033 * in which case we need to propagate the half-reg flag
3034 * up to the definer so that RA sees it:
3035 */
3036 if (out->opc == OPC_META_FO) {
3037 out = out->regs[1]->instr;
3038 out->regs[0]->flags |= IR3_REG_HALF;
3039 }
3040
3041 if (out->opc == OPC_MOV) {
3042 out->cat1.dst_type = half_type(out->cat1.dst_type);
3043 }
3044 }
3045 }
3046
3047 if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
3048 printf("BEFORE CP:\n");
3049 ir3_print(ir);
3050 }
3051
3052 ir3_cp(ir, so);
3053
3054 /* at this point, for binning pass, throw away unneeded outputs:
3055 * Note that for a6xx and later, we do this after ir3_cp to ensure
3056 * that the uniform/constant layout for BS and VS matches, so that
3057 * we can re-use same VS_CONST state group.
3058 */
3059 if (so->binning_pass && (ctx->compiler->gpu_id >= 600))
3060 fixup_binning_pass(ctx);
3061
3062 /* Insert mov if there's same instruction for each output.
3063 * eg. dEQP-GLES31.functional.shaders.opaque_type_indexing.sampler.const_expression.vertex.sampler2dshadow
3064 */
3065 for (int i = ir->noutputs - 1; i >= 0; i--) {
3066 if (!ir->outputs[i])
3067 continue;
3068 for (unsigned j = 0; j < i; j++) {
3069 if (ir->outputs[i] == ir->outputs[j]) {
3070 ir->outputs[i] =
3071 ir3_MOV(ir->outputs[i]->block, ir->outputs[i], TYPE_F32);
3072 }
3073 }
3074 }
3075
3076 if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
3077 printf("BEFORE GROUPING:\n");
3078 ir3_print(ir);
3079 }
3080
3081 ir3_sched_add_deps(ir);
3082
3083 /* Group left/right neighbors, inserting mov's where needed to
3084 * solve conflicts:
3085 */
3086 ir3_group(ir);
3087
3088 if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
3089 printf("AFTER GROUPING:\n");
3090 ir3_print(ir);
3091 }
3092
3093 ir3_depth(ir);
3094
3095 if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
3096 printf("AFTER DEPTH:\n");
3097 ir3_print(ir);
3098 }
3099
3100 ret = ir3_sched(ir);
3101 if (ret) {
3102 DBG("SCHED failed!");
3103 goto out;
3104 }
3105
3106 if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
3107 printf("AFTER SCHED:\n");
3108 ir3_print(ir);
3109 }
3110
3111 ret = ir3_ra(ir, so->type, so->frag_coord, so->frag_face);
3112 if (ret) {
3113 DBG("RA failed!");
3114 goto out;
3115 }
3116
3117 if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
3118 printf("AFTER RA:\n");
3119 ir3_print(ir);
3120 }
3121
3122 /* fixup input/outputs: */
3123 for (i = 0; i < so->outputs_count; i++) {
3124 so->outputs[i].regid = ir->outputs[i*4]->regs[0]->num;
3125 }
3126
3127 /* Note that some or all channels of an input may be unused: */
3128 actual_in = 0;
3129 inloc = 0;
3130 for (i = 0; i < so->inputs_count; i++) {
3131 unsigned j, reg = regid(63,0), compmask = 0, maxcomp = 0;
3132 so->inputs[i].ncomp = 0;
3133 so->inputs[i].inloc = inloc;
3134 for (j = 0; j < 4; j++) {
3135 struct ir3_instruction *in = inputs[(i*4) + j];
3136 if (in && !(in->flags & IR3_INSTR_UNUSED)) {
3137 compmask |= (1 << j);
3138 reg = in->regs[0]->num - j;
3139 actual_in++;
3140 so->inputs[i].ncomp++;
3141 if ((so->type == MESA_SHADER_FRAGMENT) && so->inputs[i].bary) {
3142 /* assign inloc: */
3143 assert(in->regs[1]->flags & IR3_REG_IMMED);
3144 in->regs[1]->iim_val = inloc + j;
3145 maxcomp = j + 1;
3146 }
3147 }
3148 }
3149 if ((so->type == MESA_SHADER_FRAGMENT) && compmask && so->inputs[i].bary) {
3150 so->varying_in++;
3151 so->inputs[i].compmask = (1 << maxcomp) - 1;
3152 inloc += maxcomp;
3153 } else if (!so->inputs[i].sysval) {
3154 so->inputs[i].compmask = compmask;
3155 }
3156 so->inputs[i].regid = reg;
3157 }
3158
3159 if (ctx->astc_srgb)
3160 fixup_astc_srgb(ctx);
3161
3162 /* We need to do legalize after (for frag shader's) the "bary.f"
3163 * offsets (inloc) have been assigned.
3164 */
3165 ir3_legalize(ir, &so->num_samp, &so->has_ssbo, &max_bary);
3166
3167 if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
3168 printf("AFTER LEGALIZE:\n");
3169 ir3_print(ir);
3170 }
3171
3172 so->branchstack = ctx->max_stack;
3173
3174 /* Note that actual_in counts inputs that are not bary.f'd for FS: */
3175 if (so->type == MESA_SHADER_VERTEX)
3176 so->total_in = actual_in;
3177 else
3178 so->total_in = max_bary + 1;
3179
3180 out:
3181 if (ret) {
3182 if (so->ir)
3183 ir3_destroy(so->ir);
3184 so->ir = NULL;
3185 }
3186 ir3_context_free(ctx);
3187
3188 return ret;
3189 }