r600: Fix interpolateAtCentroid
[mesa.git] / src / gallium / drivers / r600 / sb / sb_bc_parser.cpp
1 /*
2 * Copyright 2013 Vadim Girlin <vadimgirlin@gmail.com>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Vadim Girlin
25 */
26
27 #define BCP_DEBUG 0
28
29 #if BCP_DEBUG
30 #define BCP_DUMP(q) do { q } while (0)
31 #else
32 #define BCP_DUMP(q)
33 #endif
34
35 #include "r600_pipe.h"
36 #include "r600_shader.h"
37 #include "eg_sq.h" // CM_V_SQ_MOVA_DST_CF_IDX0/1
38
39 #include <stack>
40
41 #include "sb_bc.h"
42 #include "sb_shader.h"
43 #include "sb_pass.h"
44 #include "util/macros.h"
45
46 namespace r600_sb {
47
48 int bc_parser::decode() {
49
50 dw = bc->bytecode;
51 bc_ndw = bc->ndw;
52 max_cf = 0;
53
54 dec = new bc_decoder(ctx, dw, bc_ndw);
55
56 shader_target t = TARGET_UNKNOWN;
57
58 if (pshader) {
59 switch (bc->type) {
60 case PIPE_SHADER_FRAGMENT: t = TARGET_PS; break;
61 case PIPE_SHADER_VERTEX:
62 t = pshader->vs_as_ls ? TARGET_LS : (pshader->vs_as_es ? TARGET_ES : TARGET_VS);
63 break;
64 case PIPE_SHADER_GEOMETRY: t = TARGET_GS; break;
65 case PIPE_SHADER_COMPUTE: t = TARGET_COMPUTE; break;
66 case PIPE_SHADER_TESS_CTRL: t = TARGET_HS; break;
67 case PIPE_SHADER_TESS_EVAL: t = pshader->tes_as_es ? TARGET_ES : TARGET_VS; break;
68 default: assert(!"unknown shader target"); return -1; break;
69 }
70 } else {
71 if (bc->type == PIPE_SHADER_COMPUTE)
72 t = TARGET_COMPUTE;
73 else
74 t = TARGET_FETCH;
75 }
76
77 sh = new shader(ctx, t, bc->debug_id);
78 sh->safe_math = sb_context::safe_math || (t == TARGET_COMPUTE || bc->precise);
79
80 int r = decode_shader();
81
82 delete dec;
83
84 sh->ngpr = bc->ngpr;
85 sh->nstack = bc->nstack;
86
87 return r;
88 }
89
90 int bc_parser::decode_shader() {
91 int r = 0;
92 unsigned i = 0;
93 bool eop = false;
94
95 sh->init();
96
97 do {
98 eop = false;
99 if ((r = decode_cf(i, eop)))
100 return r;
101
102 } while (!eop || (i >> 1) < max_cf);
103
104 return 0;
105 }
106
107 int bc_parser::prepare() {
108 int r = 0;
109 if ((r = parse_decls()))
110 return r;
111 if ((r = prepare_ir()))
112 return r;
113 return 0;
114 }
115
116 int bc_parser::parse_decls() {
117
118 if (!pshader) {
119 if (gpr_reladdr)
120 sh->add_gpr_array(0, bc->ngpr, 0x0F);
121
122 // compute shaders have some values preloaded in R0, R1
123 sh->add_input(0 /* GPR */, true /* preloaded */, 0x0F /* mask */);
124 sh->add_input(1 /* GPR */, true /* preloaded */, 0x0F /* mask */);
125 return 0;
126 }
127
128 if (pshader->indirect_files & ~((1 << TGSI_FILE_CONSTANT) | (1 << TGSI_FILE_SAMPLER))) {
129
130 assert(pshader->num_arrays);
131
132 if (pshader->num_arrays) {
133 for (unsigned i = 0; i < pshader->num_arrays; ++i) {
134 r600_shader_array &a = pshader->arrays[i];
135 sh->add_gpr_array(a.gpr_start, a.gpr_count, a.comp_mask);
136 }
137 } else {
138 sh->add_gpr_array(0, pshader->bc.ngpr, 0x0F);
139 }
140 }
141
142 // GS inputs can add indirect addressing
143 if (sh->target == TARGET_GS) {
144 if (pshader->num_arrays) {
145 for (unsigned i = 0; i < pshader->num_arrays; ++i) {
146 r600_shader_array &a = pshader->arrays[i];
147 sh->add_gpr_array(a.gpr_start, a.gpr_count, a.comp_mask);
148 }
149 }
150 }
151
152 if (sh->target == TARGET_VS || sh->target == TARGET_ES || sh->target == TARGET_HS || sh->target == TARGET_LS)
153 sh->add_input(0, 1, 0x0F);
154 else if (sh->target == TARGET_GS) {
155 sh->add_input(0, 1, 0x0F);
156 sh->add_input(1, 1, 0x0F);
157 } else if (sh->target == TARGET_COMPUTE) {
158 sh->add_input(0, 1, 0x0F);
159 sh->add_input(1, 1, 0x0F);
160 }
161
162 bool ps_interp = ctx.hw_class >= HW_CLASS_EVERGREEN
163 && sh->target == TARGET_PS;
164
165 bool ij_interpolators[6];
166 memset(ij_interpolators, 0, sizeof(ij_interpolators));
167
168 for (unsigned i = 0; i < pshader->ninput; ++i) {
169 r600_shader_io & in = pshader->input[i];
170 bool preloaded = sh->target == TARGET_PS && !(ps_interp && in.spi_sid);
171 sh->add_input(in.gpr, preloaded, /*in.write_mask*/ 0x0F);
172 if (ps_interp && in.spi_sid) {
173 int k = eg_get_interpolator_index(in.interpolate, in.interpolate_location);
174 if (k >= 0) {
175 ij_interpolators[k] |= true;
176 if (in.uses_interpolate_at_centroid) {
177 k = eg_get_interpolator_index(in.interpolate, TGSI_INTERPOLATE_LOC_CENTROID);
178 ij_interpolators[k] |= true;
179 }
180 }
181 }
182 }
183
184 if (ps_interp) {
185 /* add the egcm ij interpolators to live inputs */
186 unsigned num_ij = 0;
187 for (unsigned i = 0; i < ARRAY_SIZE(ij_interpolators); i++) {
188 num_ij += ij_interpolators[i];
189 }
190
191 unsigned mask = (1 << (2 * num_ij)) - 1;
192 unsigned gpr = 0;
193
194 while (mask) {
195 sh->add_input(gpr, true, mask & 0x0F);
196 ++gpr;
197 mask >>= 4;
198 }
199 }
200
201 return 0;
202 }
203
204 int bc_parser::decode_cf(unsigned &i, bool &eop) {
205
206 int r;
207
208 cf_node *cf = sh->create_cf();
209 sh->root->push_back(cf);
210
211 unsigned id = i >> 1;
212
213 cf->bc.id = id;
214
215 if (cf_map.size() < id + 1)
216 cf_map.resize(id + 1);
217
218 cf_map[id] = cf;
219
220 if ((r = dec->decode_cf(i, cf->bc)))
221 return r;
222
223 cf_op_flags flags = (cf_op_flags)cf->bc.op_ptr->flags;
224
225 if (flags & CF_ALU) {
226 if ((r = decode_alu_clause(cf)))
227 return r;
228 } else if (flags & CF_FETCH) {
229 if ((r = decode_fetch_clause(cf)))
230 return r;
231 } else if (flags & CF_EXP) {
232 if (cf->bc.rw_rel)
233 gpr_reladdr = true;
234 assert(!cf->bc.rw_rel);
235 } else if (flags & CF_MEM) {
236 if (cf->bc.rw_rel)
237 gpr_reladdr = true;
238 assert(!cf->bc.rw_rel);
239 } else if (flags & CF_BRANCH) {
240 if (cf->bc.addr > max_cf)
241 max_cf = cf->bc.addr;
242 }
243
244 eop = cf->bc.end_of_program || cf->bc.op == CF_OP_CF_END ||
245 cf->bc.op == CF_OP_RET;
246 return 0;
247 }
248
249 int bc_parser::decode_alu_clause(cf_node* cf) {
250 unsigned i = cf->bc.addr << 1, cnt = cf->bc.count + 1, gcnt;
251
252 cf->subtype = NST_ALU_CLAUSE;
253
254 cgroup = 0;
255 memset(slots[0], 0, 5*sizeof(slots[0][0]));
256
257 unsigned ng = 0;
258
259 do {
260 decode_alu_group(cf, i, gcnt);
261 assert(gcnt <= cnt);
262 cnt -= gcnt;
263 ng++;
264 } while (cnt);
265
266 return 0;
267 }
268
269 int bc_parser::decode_alu_group(cf_node* cf, unsigned &i, unsigned &gcnt) {
270 int r;
271 alu_node *n;
272 alu_group_node *g = sh->create_alu_group();
273
274 cgroup = !cgroup;
275 memset(slots[cgroup], 0, 5*sizeof(slots[0][0]));
276 gcnt = 0;
277
278 unsigned literal_mask = 0;
279
280 do {
281 n = sh->create_alu();
282 g->push_back(n);
283
284 if ((r = dec->decode_alu(i, n->bc)))
285 return r;
286
287 if (!sh->assign_slot(n, slots[cgroup])) {
288 assert(!"alu slot assignment failed");
289 return -1;
290 }
291
292 gcnt++;
293
294 } while (gcnt <= 5 && !n->bc.last);
295
296 assert(n->bc.last);
297
298 for (node_iterator I = g->begin(), E = g->end(); I != E; ++I) {
299 n = static_cast<alu_node*>(*I);
300
301 if (n->bc.dst_rel)
302 gpr_reladdr = true;
303
304 for (int k = 0; k < n->bc.op_ptr->src_count; ++k) {
305 bc_alu_src &src = n->bc.src[k];
306 if (src.rel)
307 gpr_reladdr = true;
308 if (src.sel == ALU_SRC_LITERAL) {
309 literal_mask |= (1 << src.chan);
310 src.value.u = dw[i + src.chan];
311 }
312 }
313 }
314
315 unsigned literal_ndw = 0;
316 while (literal_mask) {
317 g->literals.push_back(dw[i + literal_ndw]);
318 literal_ndw += 1;
319 literal_mask >>= 1;
320 }
321
322 literal_ndw = (literal_ndw + 1) & ~1u;
323
324 i += literal_ndw;
325 gcnt += literal_ndw >> 1;
326
327 cf->push_back(g);
328 return 0;
329 }
330
331 int bc_parser::prepare_alu_clause(cf_node* cf) {
332
333 // loop over alu groups
334 for (node_iterator I = cf->begin(), E = cf->end(); I != E; ++I) {
335 assert(I->subtype == NST_ALU_GROUP);
336 alu_group_node *g = static_cast<alu_group_node*>(*I);
337 prepare_alu_group(cf, g);
338 }
339
340 return 0;
341 }
342
343 void bc_parser::save_set_cf_index(value *val, unsigned idx)
344 {
345 assert(idx <= 1);
346 assert(val);
347 cf_index_value[idx] = val;
348 }
349 value *bc_parser::get_cf_index_value(unsigned idx)
350 {
351 assert(idx <= 1);
352 assert(cf_index_value[idx]);
353 return cf_index_value[idx];
354 }
355 void bc_parser::save_mova(alu_node *mova)
356 {
357 assert(mova);
358 this->mova = mova;
359 }
360 alu_node *bc_parser::get_mova()
361 {
362 assert(mova);
363 return mova;
364 }
365
366 int bc_parser::prepare_alu_group(cf_node* cf, alu_group_node *g) {
367
368 alu_node *n;
369
370 cgroup = !cgroup;
371 memset(slots[cgroup], 0, 5*sizeof(slots[0][0]));
372
373 for (node_iterator I = g->begin(), E = g->end();
374 I != E; ++I) {
375 n = static_cast<alu_node*>(*I);
376 bool ubo_indexing[2] = {};
377
378 if (!sh->assign_slot(n, slots[cgroup])) {
379 assert(!"alu slot assignment failed");
380 return -1;
381 }
382
383 unsigned src_count = n->bc.op_ptr->src_count;
384
385 if (ctx.alu_slots(n->bc.op) & AF_4SLOT)
386 n->flags |= NF_ALU_4SLOT;
387
388 n->src.resize(src_count);
389
390 unsigned flags = n->bc.op_ptr->flags;
391
392 if (flags & AF_LDS) {
393 bool need_rw = false, need_oqa = false, need_oqb = false;
394 int ndst = 0, ncount = 0;
395
396 /* all non-read operations have side effects */
397 if (n->bc.op != LDS_OP2_LDS_READ2_RET &&
398 n->bc.op != LDS_OP1_LDS_READ_REL_RET &&
399 n->bc.op != LDS_OP1_LDS_READ_RET) {
400 n->flags |= NF_DONT_KILL;
401 ndst++;
402 need_rw = true;
403 }
404
405 if (n->bc.op >= LDS_OP2_LDS_ADD_RET && n->bc.op <= LDS_OP1_LDS_USHORT_READ_RET) {
406 need_oqa = true;
407 ndst++;
408 }
409
410 if (n->bc.op == LDS_OP2_LDS_READ2_RET || n->bc.op == LDS_OP1_LDS_READ_REL_RET) {
411 need_oqb = true;
412 ndst++;
413 }
414
415 n->dst.resize(ndst);
416 if (need_oqa)
417 n->dst[ncount++] = sh->get_special_value(SV_LDS_OQA);
418 if (need_oqb)
419 n->dst[ncount++] = sh->get_special_value(SV_LDS_OQB);
420 if (need_rw)
421 n->dst[ncount++] = sh->get_special_value(SV_LDS_RW);
422
423 n->flags |= NF_DONT_MOVE | NF_DONT_HOIST;
424
425 } else if (flags & AF_PRED) {
426 n->dst.resize(3);
427 if (n->bc.update_pred)
428 n->dst[1] = sh->get_special_value(SV_ALU_PRED);
429 if (n->bc.update_exec_mask)
430 n->dst[2] = sh->get_special_value(SV_EXEC_MASK);
431
432 n->flags |= NF_DONT_HOIST;
433
434 } else if (flags & AF_KILL) {
435
436 n->dst.resize(2);
437 n->dst[1] = sh->get_special_value(SV_VALID_MASK);
438 sh->set_uses_kill();
439
440 n->flags |= NF_DONT_HOIST | NF_DONT_MOVE |
441 NF_DONT_KILL | NF_SCHEDULE_EARLY;
442
443 } else {
444 n->dst.resize(1);
445 }
446
447 if (n->bc.op == ALU_OP0_SET_CF_IDX0 || n->bc.op == ALU_OP0_SET_CF_IDX1) {
448 // Move CF_IDX value into tex instruction operands, scheduler will later re-emit setting of CF_IDX
449 // DCE will kill this op
450 save_set_cf_index(get_mova()->src[0], n->bc.op == ALU_OP0_SET_CF_IDX1);
451 } else if (flags & AF_MOVA) {
452
453 n->dst[0] = sh->get_special_value(SV_AR_INDEX);
454 save_mova(n);
455
456 n->flags |= NF_DONT_HOIST;
457
458 } else if ((n->bc.op_ptr->src_count == 3 || n->bc.write_mask) && !(flags & AF_LDS)) {
459 assert(!n->bc.dst_rel || n->bc.index_mode == INDEX_AR_X);
460
461 value *v = sh->get_gpr_value(false, n->bc.dst_gpr, n->bc.dst_chan,
462 n->bc.dst_rel);
463
464 n->dst[0] = v;
465 }
466
467 if (n->bc.pred_sel) {
468 sh->has_alu_predication = true;
469 n->pred = sh->get_special_value(SV_ALU_PRED);
470 }
471
472 for (unsigned s = 0; s < src_count; ++s) {
473 bc_alu_src &src = n->bc.src[s];
474
475 if (src.sel == ALU_SRC_LITERAL) {
476 n->src[s] = sh->get_const_value(src.value);
477 } else if (src.sel == ALU_SRC_PS || src.sel == ALU_SRC_PV) {
478 unsigned pgroup = !cgroup, prev_slot = src.sel == ALU_SRC_PS ?
479 SLOT_TRANS : src.chan;
480
481 // XXX shouldn't happen but llvm backend uses PS on cayman
482 if (prev_slot == SLOT_TRANS && ctx.is_cayman())
483 prev_slot = SLOT_X;
484
485 alu_node *prev_alu = slots[pgroup][prev_slot];
486
487 assert(prev_alu);
488
489 if (!prev_alu->dst[0]) {
490 value * t = sh->create_temp_value();
491 prev_alu->dst[0] = t;
492 }
493
494 value *d = prev_alu->dst[0];
495
496 if (d->is_rel()) {
497 d = sh->get_gpr_value(true, prev_alu->bc.dst_gpr,
498 prev_alu->bc.dst_chan,
499 prev_alu->bc.dst_rel);
500 }
501
502 n->src[s] = d;
503 } else if (ctx.is_kcache_sel(src.sel)) {
504 unsigned sel = src.sel, kc_addr;
505 unsigned kc_set = ((sel >> 7) & 2) + ((sel >> 5) & 1);
506
507 bc_kcache &kc = cf->bc.kc[kc_set];
508 kc_addr = (kc.addr << 4) + (sel & 0x1F);
509 n->src[s] = sh->get_kcache_value(kc.bank, kc_addr, src.chan, (alu_kcache_index_mode)kc.index_mode);
510
511 if (kc.index_mode != KC_INDEX_NONE) {
512 assert(kc.index_mode != KC_LOCK_LOOP);
513 ubo_indexing[kc.index_mode - KC_INDEX_0] = true;
514 }
515 } else if (src.sel < MAX_GPR) {
516 value *v = sh->get_gpr_value(true, src.sel, src.chan, src.rel);
517
518 n->src[s] = v;
519
520 } else if (src.sel >= ALU_SRC_PARAM_OFFSET) {
521 // using slot for value channel because in fact the slot
522 // determines the channel that is loaded by INTERP_LOAD_P0
523 // (and maybe some others).
524 // otherwise GVN will consider INTERP_LOAD_P0s with the same
525 // param index as equal instructions and leave only one of them
526 n->src[s] = sh->get_special_ro_value(sel_chan(src.sel,
527 n->bc.slot));
528 } else if (ctx.is_lds_oq(src.sel)) {
529 switch (src.sel) {
530 case ALU_SRC_LDS_OQ_A:
531 case ALU_SRC_LDS_OQ_B:
532 assert(!"Unsupported LDS queue access in SB");
533 break;
534 case ALU_SRC_LDS_OQ_A_POP:
535 n->src[s] = sh->get_special_value(SV_LDS_OQA);
536 break;
537 case ALU_SRC_LDS_OQ_B_POP:
538 n->src[s] = sh->get_special_value(SV_LDS_OQB);
539 break;
540 }
541 n->flags |= NF_DONT_HOIST | NF_DONT_MOVE;
542
543 } else {
544 switch (src.sel) {
545 case ALU_SRC_0:
546 n->src[s] = sh->get_const_value(0);
547 break;
548 case ALU_SRC_0_5:
549 n->src[s] = sh->get_const_value(0.5f);
550 break;
551 case ALU_SRC_1:
552 n->src[s] = sh->get_const_value(1.0f);
553 break;
554 case ALU_SRC_1_INT:
555 n->src[s] = sh->get_const_value(1);
556 break;
557 case ALU_SRC_M_1_INT:
558 n->src[s] = sh->get_const_value(-1);
559 break;
560 default:
561 n->src[s] = sh->get_special_ro_value(src.sel);
562 break;
563 }
564 }
565 }
566
567 // add UBO index values if any as dependencies
568 if (ubo_indexing[0]) {
569 n->src.push_back(get_cf_index_value(0));
570 }
571 if (ubo_indexing[1]) {
572 n->src.push_back(get_cf_index_value(1));
573 }
574
575 if ((flags & AF_MOVA) && (n->bc.dst_gpr == CM_V_SQ_MOVA_DST_CF_IDX0 || n->bc.dst_gpr == CM_V_SQ_MOVA_DST_CF_IDX1) &&
576 ctx.is_cayman())
577 // Move CF_IDX value into tex instruction operands, scheduler will later re-emit setting of CF_IDX
578 save_set_cf_index(n->src[0], n->bc.dst_gpr == CM_V_SQ_MOVA_DST_CF_IDX1);
579 }
580
581 // pack multislot instructions into alu_packed_node
582
583 alu_packed_node *p = NULL;
584 for (node_iterator N, I = g->begin(), E = g->end(); I != E; I = N) {
585 N = I + 1;
586 alu_node *a = static_cast<alu_node*>(*I);
587 unsigned sflags = a->bc.slot_flags;
588
589 if (sflags == AF_4V || (ctx.is_cayman() && sflags == AF_S)) {
590 if (!p)
591 p = sh->create_alu_packed();
592
593 a->remove();
594 p->push_back(a);
595 }
596 }
597
598 if (p) {
599 g->push_front(p);
600
601 if (p->count() == 3 && ctx.is_cayman()) {
602 // cayman's scalar instruction that can use 3 or 4 slots
603
604 // FIXME for simplicity we'll always add 4th slot,
605 // but probably we might want to always remove 4th slot and make
606 // sure that regalloc won't choose 'w' component for dst
607
608 alu_node *f = static_cast<alu_node*>(p->first);
609 alu_node *a = sh->create_alu();
610 a->src = f->src;
611 a->dst.resize(f->dst.size());
612 a->bc = f->bc;
613 a->bc.slot = SLOT_W;
614 p->push_back(a);
615 }
616 }
617
618 return 0;
619 }
620
621 int bc_parser::decode_fetch_clause(cf_node* cf) {
622 int r;
623 unsigned i = cf->bc.addr << 1, cnt = cf->bc.count + 1;
624
625 if (cf->bc.op_ptr->flags & FF_GDS)
626 cf->subtype = NST_GDS_CLAUSE;
627 else
628 cf->subtype = NST_TEX_CLAUSE;
629
630 while (cnt--) {
631 fetch_node *n = sh->create_fetch();
632 cf->push_back(n);
633 if ((r = dec->decode_fetch(i, n->bc)))
634 return r;
635 if (n->bc.src_rel || n->bc.dst_rel)
636 gpr_reladdr = true;
637
638 }
639 return 0;
640 }
641
642 int bc_parser::prepare_fetch_clause(cf_node *cf) {
643
644 vvec grad_v, grad_h, texture_offsets;
645
646 for (node_iterator I = cf->begin(), E = cf->end(); I != E; ++I) {
647
648 fetch_node *n = static_cast<fetch_node*>(*I);
649 assert(n->is_valid());
650
651 unsigned flags = n->bc.op_ptr->flags;
652
653 unsigned vtx = flags & FF_VTX;
654 unsigned gds = flags & FF_GDS;
655 unsigned num_src = gds ? 2 : vtx ? ctx.vtx_src_num : 4;
656
657 n->dst.resize(4);
658
659 if (gds) {
660 n->flags |= NF_DONT_HOIST | NF_DONT_MOVE | NF_DONT_KILL;
661 }
662 if (flags & (FF_SETGRAD | FF_USEGRAD | FF_GETGRAD)) {
663 sh->uses_gradients = true;
664 }
665
666 if (flags & (FF_SETGRAD | FF_SET_TEXTURE_OFFSETS)) {
667
668 vvec *grad = NULL;
669
670 switch (n->bc.op) {
671 case FETCH_OP_SET_GRADIENTS_V:
672 grad = &grad_v;
673 break;
674 case FETCH_OP_SET_GRADIENTS_H:
675 grad = &grad_h;
676 break;
677 case FETCH_OP_SET_TEXTURE_OFFSETS:
678 grad = &texture_offsets;
679 break;
680 default:
681 assert(!"unexpected SET_GRAD instruction");
682 return -1;
683 }
684
685 if (grad->empty())
686 grad->resize(4);
687
688 for(unsigned s = 0; s < 4; ++s) {
689 unsigned sw = n->bc.src_sel[s];
690 if (sw <= SEL_W)
691 (*grad)[s] = sh->get_gpr_value(true, n->bc.src_gpr,
692 sw, false);
693 else if (sw == SEL_0)
694 (*grad)[s] = sh->get_const_value(0.0f);
695 else if (sw == SEL_1)
696 (*grad)[s] = sh->get_const_value(1.0f);
697 }
698 } else {
699 // Fold source values for instructions with hidden target values in to the instructions
700 // using them. The set instructions are later re-emitted by bc_finalizer
701 if (flags & FF_USEGRAD) {
702 n->src.resize(12);
703 std::copy(grad_v.begin(), grad_v.end(), n->src.begin() + 4);
704 std::copy(grad_h.begin(), grad_h.end(), n->src.begin() + 8);
705 } else if (flags & FF_USE_TEXTURE_OFFSETS) {
706 n->src.resize(8);
707 std::copy(texture_offsets.begin(), texture_offsets.end(), n->src.begin() + 4);
708 } else {
709 n->src.resize(4);
710 }
711
712 for(int s = 0; s < 4; ++s) {
713 if (n->bc.dst_sel[s] != SEL_MASK)
714 n->dst[s] = sh->get_gpr_value(false, n->bc.dst_gpr, s, false);
715 // NOTE: it doesn't matter here which components of the result we
716 // are using, but original n->bc.dst_sel should be taken into
717 // account when building the bytecode
718 }
719 for(unsigned s = 0; s < num_src; ++s) {
720 if (n->bc.src_sel[s] <= SEL_W)
721 n->src[s] = sh->get_gpr_value(true, n->bc.src_gpr,
722 n->bc.src_sel[s], false);
723 }
724
725 // Scheduler will emit the appropriate instructions to set CF_IDX0/1
726 if (n->bc.sampler_index_mode != V_SQ_CF_INDEX_NONE) {
727 n->src.push_back(get_cf_index_value(n->bc.sampler_index_mode == V_SQ_CF_INDEX_1));
728 }
729 if (n->bc.resource_index_mode != V_SQ_CF_INDEX_NONE) {
730 n->src.push_back(get_cf_index_value(n->bc.resource_index_mode == V_SQ_CF_INDEX_1));
731 }
732 }
733
734 if (n->bc.op == FETCH_OP_READ_SCRATCH) {
735 n->src.push_back(sh->get_special_value(SV_SCRATCH));
736 n->dst.push_back(sh->get_special_value(SV_SCRATCH));
737 }
738 }
739
740 return 0;
741 }
742
743 int bc_parser::prepare_ir() {
744
745 for(id_cf_map::iterator I = cf_map.begin(), E = cf_map.end(); I != E; ++I) {
746 cf_node *c = *I;
747
748 if (!c)
749 continue;
750
751 unsigned flags = c->bc.op_ptr->flags;
752
753 if (flags & CF_ALU) {
754 prepare_alu_clause(c);
755 } else if (flags & CF_FETCH) {
756 prepare_fetch_clause(c);
757 } else if (c->bc.op == CF_OP_CALL_FS) {
758 sh->init_call_fs(c);
759 c->flags |= NF_SCHEDULE_EARLY | NF_DONT_MOVE;
760 } else if (flags & CF_LOOP_START) {
761 prepare_loop(c);
762 } else if (c->bc.op == CF_OP_JUMP) {
763 prepare_if(c);
764 } else if (c->bc.op == CF_OP_LOOP_END) {
765 loop_stack.pop();
766 } else if (c->bc.op == CF_OP_LOOP_CONTINUE) {
767 assert(!loop_stack.empty());
768 repeat_node *rep = sh->create_repeat(loop_stack.top());
769 if (c->parent->first != c)
770 rep->move(c->parent->first, c);
771 c->replace_with(rep);
772 sh->simplify_dep_rep(rep);
773 } else if (c->bc.op == CF_OP_LOOP_BREAK) {
774 assert(!loop_stack.empty());
775 depart_node *dep = sh->create_depart(loop_stack.top());
776 if (c->parent->first != c)
777 dep->move(c->parent->first, c);
778 c->replace_with(dep);
779 sh->simplify_dep_rep(dep);
780 } else if (flags & CF_EXP) {
781
782 // unroll burst exports
783
784 assert(c->bc.op == CF_OP_EXPORT || c->bc.op == CF_OP_EXPORT_DONE);
785
786 c->bc.set_op(CF_OP_EXPORT);
787
788 unsigned burst_count = c->bc.burst_count;
789 unsigned eop = c->bc.end_of_program;
790
791 c->bc.end_of_program = 0;
792 c->bc.burst_count = 0;
793
794 do {
795 c->src.resize(4);
796
797 for(int s = 0; s < 4; ++s) {
798 switch (c->bc.sel[s]) {
799 case SEL_0:
800 c->src[s] = sh->get_const_value(0.0f);
801 break;
802 case SEL_1:
803 c->src[s] = sh->get_const_value(1.0f);
804 break;
805 case SEL_MASK:
806 break;
807 default:
808 if (c->bc.sel[s] <= SEL_W)
809 c->src[s] = sh->get_gpr_value(true, c->bc.rw_gpr,
810 c->bc.sel[s], false);
811 else
812 assert(!"invalid src_sel for export");
813 }
814 }
815
816 if (!burst_count--)
817 break;
818
819 cf_node *cf_next = sh->create_cf();
820 cf_next->bc = c->bc;
821 ++cf_next->bc.rw_gpr;
822 ++cf_next->bc.array_base;
823
824 c->insert_after(cf_next);
825 c = cf_next;
826
827 } while (1);
828
829 c->bc.end_of_program = eop;
830 } else if (flags & CF_MEM) {
831
832 unsigned burst_count = c->bc.burst_count;
833 unsigned eop = c->bc.end_of_program;
834
835 c->bc.end_of_program = 0;
836 c->bc.burst_count = 0;
837
838 do {
839
840 if (ctx.hw_class == HW_CLASS_R600 && c->bc.op == CF_OP_MEM_SCRATCH &&
841 (c->bc.type == 2 || c->bc.type == 3)) {
842 c->dst.resize(4);
843 for(int s = 0; s < 4; ++s) {
844 if (c->bc.comp_mask & (1 << s))
845 c->dst[s] =
846 sh->get_gpr_value(true, c->bc.rw_gpr, s, false);
847 }
848 } else {
849 c->src.resize(4);
850
851
852 for(int s = 0; s < 4; ++s) {
853 if (c->bc.comp_mask & (1 << s))
854 c->src[s] =
855 sh->get_gpr_value(true, c->bc.rw_gpr, s, false);
856 }
857 }
858
859 if (((flags & CF_RAT) || (!(flags & CF_STRM))) && (c->bc.type & 1)) { // indexed write
860 c->src.resize(8);
861 for(int s = 0; s < 3; ++s) {
862 c->src[4 + s] =
863 sh->get_gpr_value(true, c->bc.index_gpr, s, false);
864 }
865
866 // FIXME probably we can relax it a bit
867 c->flags |= NF_DONT_HOIST | NF_DONT_MOVE;
868 }
869
870 if (flags & CF_EMIT) {
871 // Instruction implicitly depends on prior [EMIT_][CUT]_VERTEX
872 c->src.push_back(sh->get_special_value(SV_GEOMETRY_EMIT));
873 c->dst.push_back(sh->get_special_value(SV_GEOMETRY_EMIT));
874 if (sh->target == TARGET_ES) {
875 // For ES shaders this is an export
876 c->flags |= NF_DONT_KILL;
877 }
878 }
879 else if (c->bc.op == CF_OP_MEM_SCRATCH) {
880 c->src.push_back(sh->get_special_value(SV_SCRATCH));
881 c->dst.push_back(sh->get_special_value(SV_SCRATCH));
882 }
883
884 if (!burst_count--)
885 break;
886
887 cf_node *cf_next = sh->create_cf();
888 cf_next->bc = c->bc;
889 ++cf_next->bc.rw_gpr;
890
891 // FIXME is it correct?
892 cf_next->bc.array_base += cf_next->bc.elem_size + 1;
893
894 c->insert_after(cf_next);
895 c = cf_next;
896 } while (1);
897
898 c->bc.end_of_program = eop;
899
900 } else if (flags & CF_EMIT) {
901 /* quick peephole */
902 cf_node *prev = static_cast<cf_node *>(c->prev);
903 if (c->bc.op == CF_OP_CUT_VERTEX &&
904 prev && prev->is_valid() &&
905 prev->bc.op == CF_OP_EMIT_VERTEX &&
906 c->bc.count == prev->bc.count) {
907 prev->bc.set_op(CF_OP_EMIT_CUT_VERTEX);
908 prev->bc.end_of_program = c->bc.end_of_program;
909 c->remove();
910 }
911 else {
912 c->flags |= NF_DONT_KILL | NF_DONT_HOIST | NF_DONT_MOVE;
913
914 c->src.push_back(sh->get_special_value(SV_GEOMETRY_EMIT));
915 c->dst.push_back(sh->get_special_value(SV_GEOMETRY_EMIT));
916 }
917 } else if (c->bc.op == CF_OP_WAIT_ACK) {
918 c->src.push_back(sh->get_special_value(SV_SCRATCH));
919 c->dst.push_back(sh->get_special_value(SV_SCRATCH));
920 }
921 }
922
923 assert(loop_stack.empty());
924 return 0;
925 }
926
927 int bc_parser::prepare_loop(cf_node* c) {
928 assert(c->bc.addr-1 < cf_map.size());
929
930 cf_node *end = cf_map[c->bc.addr - 1];
931 assert(end->bc.op == CF_OP_LOOP_END);
932 assert(c->parent == end->parent);
933
934 region_node *reg = sh->create_region();
935 repeat_node *rep = sh->create_repeat(reg);
936
937 reg->push_back(rep);
938 c->insert_before(reg);
939 rep->move(c, end->next);
940
941 reg->src_loop = true;
942
943 loop_stack.push(reg);
944 return 0;
945 }
946
947 int bc_parser::prepare_if(cf_node* c) {
948 assert(c->bc.addr-1 < cf_map.size());
949 cf_node *c_else = NULL, *end = cf_map[c->bc.addr];
950
951 if (!end)
952 return 0; // not quite sure how this happens, malformed input?
953
954 BCP_DUMP(
955 sblog << "parsing JUMP @" << c->bc.id;
956 sblog << "\n";
957 );
958
959 if (end->bc.op == CF_OP_ELSE) {
960 BCP_DUMP(
961 sblog << " found ELSE : ";
962 dump::dump_op(end);
963 sblog << "\n";
964 );
965
966 c_else = end;
967 end = cf_map[c_else->bc.addr];
968 } else {
969 BCP_DUMP(
970 sblog << " no else\n";
971 );
972
973 c_else = end;
974 }
975
976 if (c_else->parent != c->parent)
977 c_else = NULL;
978
979 if (end && end->parent != c->parent)
980 end = NULL;
981
982 region_node *reg = sh->create_region();
983
984 depart_node *dep2 = sh->create_depart(reg);
985 depart_node *dep = sh->create_depart(reg);
986 if_node *n_if = sh->create_if();
987
988 c->insert_before(reg);
989
990 if (c_else != end)
991 dep->move(c_else, end);
992 dep2->move(c, end);
993
994 reg->push_back(dep);
995 dep->push_front(n_if);
996 n_if->push_back(dep2);
997
998 n_if->cond = sh->get_special_value(SV_EXEC_MASK);
999
1000 return 0;
1001 }
1002
1003
1004 } // namespace r600_sb