a78ca4b91d80b9b7da8337a3f89d64cc972a5bd0
[mesa.git] / src / gallium / drivers / r600 / sb / sb_bc_parser.cpp
1 /*
2 * Copyright 2013 Vadim Girlin <vadimgirlin@gmail.com>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Vadim Girlin
25 */
26
27 #define BCP_DEBUG 0
28
29 #if BCP_DEBUG
30 #define BCP_DUMP(q) do { q } while (0)
31 #else
32 #define BCP_DUMP(q)
33 #endif
34
35 extern "C" {
36 #include "r600_pipe.h"
37 #include "r600_shader.h"
38 }
39
40 #include <stack>
41
42 #include "sb_bc.h"
43 #include "sb_shader.h"
44 #include "sb_pass.h"
45
46 namespace r600_sb {
47
48 int bc_parser::decode() {
49
50 dw = bc->bytecode;
51 bc_ndw = bc->ndw;
52 max_cf = 0;
53
54 dec = new bc_decoder(ctx, dw, bc_ndw);
55
56 shader_target t = TARGET_UNKNOWN;
57
58 if (pshader) {
59 switch (bc->type) {
60 case TGSI_PROCESSOR_FRAGMENT: t = TARGET_PS; break;
61 case TGSI_PROCESSOR_VERTEX: t = TARGET_VS; break;
62 case TGSI_PROCESSOR_COMPUTE: t = TARGET_COMPUTE; break;
63 default: assert(!"unknown shader target"); return -1; break;
64 }
65 } else {
66 if (bc->type == TGSI_PROCESSOR_COMPUTE)
67 t = TARGET_COMPUTE;
68 else
69 t = TARGET_FETCH;
70 }
71
72 sh = new shader(ctx, t, bc->debug_id);
73 int r = decode_shader();
74
75 delete dec;
76
77 sh->ngpr = bc->ngpr;
78 sh->nstack = bc->nstack;
79
80 return r;
81 }
82
83 int bc_parser::decode_shader() {
84 int r = 0;
85 unsigned i = 0;
86 bool eop = false;
87
88 sh->init();
89
90 do {
91 eop = false;
92 if ((r = decode_cf(i, eop)))
93 return r;
94
95 } while (!eop || (i >> 1) <= max_cf);
96
97 return 0;
98 }
99
100 int bc_parser::prepare() {
101 int r = 0;
102 if ((r = parse_decls()))
103 return r;
104 if ((r = prepare_ir()))
105 return r;
106 return 0;
107 }
108
109 int bc_parser::parse_decls() {
110
111 if (!pshader) {
112 sh->add_gpr_array(0, pshader->bc.ngpr, 0x0F);
113 return 0;
114 }
115
116 if (pshader->indirect_files & ~(1 << TGSI_FILE_CONSTANT)) {
117
118 assert(pshader->num_arrays);
119
120 if (pshader->num_arrays) {
121 for (unsigned i = 0; i < pshader->num_arrays; ++i) {
122 r600_shader_array &a = pshader->arrays[i];
123 sh->add_gpr_array(a.gpr_start, a.gpr_count, a.comp_mask);
124 }
125 } else {
126 sh->add_gpr_array(0, pshader->bc.ngpr, 0x0F);
127 }
128 }
129
130 if (sh->target == TARGET_VS)
131 sh->add_input(0, 1, 0x0F);
132
133 bool ps_interp = ctx.hw_class >= HW_CLASS_EVERGREEN
134 && sh->target == TARGET_PS;
135
136 unsigned linear = 0, persp = 0, centroid = 1;
137
138 for (unsigned i = 0; i < pshader->ninput; ++i) {
139 r600_shader_io & in = pshader->input[i];
140 bool preloaded = sh->target == TARGET_PS && !(ps_interp && in.spi_sid);
141 sh->add_input(in.gpr, preloaded, /*in.write_mask*/ 0x0F);
142 if (ps_interp && in.spi_sid) {
143 if (in.interpolate == TGSI_INTERPOLATE_LINEAR ||
144 in.interpolate == TGSI_INTERPOLATE_COLOR)
145 linear = 1;
146 else if (in.interpolate == TGSI_INTERPOLATE_PERSPECTIVE)
147 persp = 1;
148 if (in.centroid)
149 centroid = 2;
150 }
151 }
152
153 if (ps_interp) {
154 unsigned mask = (1 << (2 * (linear + persp) * centroid)) - 1;
155 unsigned gpr = 0;
156
157 while (mask) {
158 sh->add_input(gpr, true, mask & 0x0F);
159 ++gpr;
160 mask >>= 4;
161 }
162 }
163
164 return 0;
165 }
166
167 int bc_parser::decode_cf(unsigned &i, bool &eop) {
168
169 int r;
170
171 cf_node *cf = sh->create_cf();
172 sh->root->push_back(cf);
173
174 unsigned id = i >> 1;
175
176 cf->bc.id = id;
177
178 if (cf_map.size() < id + 1)
179 cf_map.resize(id + 1);
180
181 cf_map[id] = cf;
182
183 if ((r = dec->decode_cf(i, cf->bc)))
184 return r;
185
186 cf_op_flags flags = (cf_op_flags)cf->bc.op_ptr->flags;
187
188 if (flags & CF_ALU) {
189 if ((r = decode_alu_clause(cf)))
190 return r;
191 } else if (flags & CF_FETCH) {
192 if ((r = decode_fetch_clause(cf)))
193 return r;;
194 } else if (flags & CF_EXP) {
195 assert(!cf->bc.rw_rel);
196 } else if (flags & (CF_STRM | CF_RAT)) {
197 assert(!cf->bc.rw_rel);
198 } else if (flags & CF_BRANCH) {
199 if (cf->bc.addr > max_cf)
200 max_cf = cf->bc.addr;
201 }
202
203 eop = cf->bc.end_of_program || cf->bc.op == CF_OP_CF_END ||
204 cf->bc.op == CF_OP_RET;
205 return 0;
206 }
207
208 int bc_parser::decode_alu_clause(cf_node* cf) {
209 unsigned i = cf->bc.addr << 1, cnt = cf->bc.count + 1, gcnt;
210
211 cf->subtype = NST_ALU_CLAUSE;
212
213 cgroup = 0;
214 memset(slots[0], 0, 5*sizeof(slots[0][0]));
215
216 unsigned ng = 0;
217
218 do {
219 decode_alu_group(cf, i, gcnt);
220 assert(gcnt <= cnt);
221 cnt -= gcnt;
222 ng++;
223 } while (cnt);
224
225 return 0;
226 }
227
228 int bc_parser::decode_alu_group(cf_node* cf, unsigned &i, unsigned &gcnt) {
229 int r;
230 alu_node *n;
231 alu_group_node *g = sh->create_alu_group();
232
233 cgroup = !cgroup;
234 memset(slots[cgroup], 0, 5*sizeof(slots[0][0]));
235 gcnt = 0;
236
237 unsigned literal_mask = 0;
238
239 do {
240 n = sh->create_alu();
241 g->push_back(n);
242
243 if ((r = dec->decode_alu(i, n->bc)))
244 return r;
245
246 if (!sh->assign_slot(n, slots[cgroup])) {
247 assert(!"alu slot assignment failed");
248 return -1;
249 }
250
251 gcnt++;
252
253 } while (gcnt <= 5 && !n->bc.last);
254
255 assert(n->bc.last);
256
257 for (node_iterator I = g->begin(), E = g->end(); I != E; ++I) {
258 n = static_cast<alu_node*>(*I);
259
260 for (int k = 0; k < n->bc.op_ptr->src_count; ++k) {
261 bc_alu_src &src = n->bc.src[k];
262 if (src.sel == ALU_SRC_LITERAL) {
263 literal_mask |= (1 << src.chan);
264 src.value.u = dw[i + src.chan];
265 }
266 }
267 }
268
269 unsigned literal_ndw = 0;
270 while (literal_mask) {
271 g->literals.push_back(dw[i + literal_ndw]);
272 literal_ndw += 1;
273 literal_mask >>= 1;
274 }
275
276 literal_ndw = (literal_ndw + 1) & ~1u;
277
278 i += literal_ndw;
279 gcnt += literal_ndw >> 1;
280
281 cf->push_back(g);
282 return 0;
283 }
284
285 int bc_parser::prepare_alu_clause(cf_node* cf) {
286
287 // loop over alu groups
288 for (node_iterator I = cf->begin(), E = cf->end(); I != E; ++I) {
289 assert(I->subtype == NST_ALU_GROUP);
290 alu_group_node *g = static_cast<alu_group_node*>(*I);
291 prepare_alu_group(cf, g);
292 }
293
294 return 0;
295 }
296
297 int bc_parser::prepare_alu_group(cf_node* cf, alu_group_node *g) {
298
299 alu_node *n;
300
301 cgroup = !cgroup;
302 memset(slots[cgroup], 0, 5*sizeof(slots[0][0]));
303
304 for (node_iterator I = g->begin(), E = g->end();
305 I != E; ++I) {
306 n = static_cast<alu_node*>(*I);
307
308 if (!sh->assign_slot(n, slots[cgroup])) {
309 assert(!"alu slot assignment failed");
310 return -1;
311 }
312
313 unsigned src_count = n->bc.op_ptr->src_count;
314
315 if (ctx.alu_slots(n->bc.op) & AF_4SLOT)
316 n->flags |= NF_ALU_4SLOT;
317
318 n->src.resize(src_count);
319
320 unsigned flags = n->bc.op_ptr->flags;
321
322 if (flags & AF_PRED) {
323 n->dst.resize(3);
324 if (n->bc.update_pred)
325 n->dst[1] = sh->get_special_value(SV_ALU_PRED);
326 if (n->bc.update_exec_mask)
327 n->dst[2] = sh->get_special_value(SV_EXEC_MASK);
328
329 n->flags |= NF_DONT_HOIST;
330
331 } else if (flags & AF_KILL) {
332
333 n->dst.resize(2);
334 n->dst[1] = sh->get_special_value(SV_VALID_MASK);
335 sh->set_uses_kill();
336
337 n->flags |= NF_DONT_HOIST | NF_DONT_MOVE |
338 NF_DONT_KILL | NF_SCHEDULE_EARLY;
339
340 } else {
341 n->dst.resize(1);
342 }
343
344 if (flags & AF_MOVA) {
345
346 n->dst[0] = sh->get_special_value(SV_AR_INDEX);
347
348 n->flags |= NF_DONT_HOIST;
349
350 } else if (n->bc.op_ptr->src_count == 3 || n->bc.write_mask) {
351 assert(!n->bc.dst_rel || n->bc.index_mode == INDEX_AR_X);
352
353 value *v = sh->get_gpr_value(false, n->bc.dst_gpr, n->bc.dst_chan,
354 n->bc.dst_rel);
355
356 n->dst[0] = v;
357 }
358
359 if (n->bc.pred_sel) {
360 sh->has_alu_predication = true;
361 n->pred = sh->get_special_value(SV_ALU_PRED);
362 }
363
364 for (unsigned s = 0; s < src_count; ++s) {
365 bc_alu_src &src = n->bc.src[s];
366
367 if (src.sel == ALU_SRC_LITERAL) {
368 n->src[s] = sh->get_const_value(src.value);
369 } else if (src.sel == ALU_SRC_PS || src.sel == ALU_SRC_PV) {
370 unsigned pgroup = !cgroup, prev_slot = src.sel == ALU_SRC_PS ?
371 SLOT_TRANS : src.chan;
372 alu_node *prev_alu = slots[pgroup][prev_slot];
373
374 assert(prev_alu);
375
376 if (!prev_alu->dst[0]) {
377 value * t = sh->create_temp_value();
378 prev_alu->dst[0] = t;
379 }
380
381 value *d = prev_alu->dst[0];
382
383 if (d->is_rel()) {
384 d = sh->get_gpr_value(true, prev_alu->bc.dst_gpr,
385 prev_alu->bc.dst_chan,
386 prev_alu->bc.dst_rel);
387 }
388
389 n->src[s] = d;
390 } else if (ctx.is_kcache_sel(src.sel)) {
391 unsigned sel = src.sel, kc_addr;
392 unsigned kc_set = ((sel >> 7) & 2) + ((sel >> 5) & 1);
393
394 bc_kcache &kc = cf->bc.kc[kc_set];
395 kc_addr = (kc.addr << 4) + (sel & 0x1F);
396 n->src[s] = sh->get_kcache_value(kc.bank, kc_addr, src.chan);
397 } else if (src.sel < MAX_GPR) {
398 value *v = sh->get_gpr_value(true, src.sel, src.chan, src.rel);
399
400 n->src[s] = v;
401
402 } else if (src.sel >= ALU_SRC_PARAM_OFFSET) {
403 // using slot for value channel because in fact the slot
404 // determines the channel that is loaded by INTERP_LOAD_P0
405 // (and maybe some others).
406 // otherwise GVN will consider INTERP_LOAD_P0s with the same
407 // param index as equal instructions and leave only one of them
408 n->src[s] = sh->get_special_ro_value(sel_chan(src.sel,
409 n->bc.slot));
410 } else {
411 switch (src.sel) {
412 case ALU_SRC_0:
413 n->src[s] = sh->get_const_value(0);
414 break;
415 case ALU_SRC_0_5:
416 n->src[s] = sh->get_const_value(0.5f);
417 break;
418 case ALU_SRC_1:
419 n->src[s] = sh->get_const_value(1.0f);
420 break;
421 case ALU_SRC_1_INT:
422 n->src[s] = sh->get_const_value(1);
423 break;
424 case ALU_SRC_M_1_INT:
425 n->src[s] = sh->get_const_value(-1);
426 break;
427 default:
428 n->src[s] = sh->get_special_ro_value(src.sel);
429 break;
430 }
431 }
432 }
433 }
434
435 // pack multislot instructions into alu_packed_node
436
437 alu_packed_node *p = NULL;
438 for (node_iterator N, I = g->begin(), E = g->end(); I != E; I = N) {
439 N = I + 1;
440 alu_node *a = static_cast<alu_node*>(*I);
441 unsigned sflags = a->bc.slot_flags;
442
443 if (sflags == AF_4V || (ctx.is_cayman() && sflags == AF_S)) {
444 if (!p)
445 p = sh->create_alu_packed();
446
447 a->remove();
448 p->push_back(a);
449 }
450 }
451
452 if (p) {
453 g->push_front(p);
454
455 if (p->count() == 3 && ctx.is_cayman()) {
456 // cayman's scalar instruction that can use 3 or 4 slots
457
458 // FIXME for simplicity we'll always add 4th slot,
459 // but probably we might want to always remove 4th slot and make
460 // sure that regalloc won't choose 'w' component for dst
461
462 alu_node *f = static_cast<alu_node*>(p->first);
463 alu_node *a = sh->create_alu();
464 a->src = f->src;
465 a->dst.resize(f->dst.size());
466 a->bc = f->bc;
467 a->bc.slot = SLOT_W;
468 p->push_back(a);
469 }
470 }
471
472 return 0;
473 }
474
475 int bc_parser::decode_fetch_clause(cf_node* cf) {
476 int r;
477 unsigned i = cf->bc.addr << 1, cnt = cf->bc.count + 1;
478
479 cf->subtype = NST_TEX_CLAUSE;
480
481 while (cnt--) {
482 fetch_node *n = sh->create_fetch();
483 cf->push_back(n);
484 if ((r = dec->decode_fetch(i, n->bc)))
485 return r;
486
487 }
488 return 0;
489 }
490
491 int bc_parser::prepare_fetch_clause(cf_node *cf) {
492
493 vvec grad_v, grad_h;
494
495 for (node_iterator I = cf->begin(), E = cf->end(); I != E; ++I) {
496
497 fetch_node *n = static_cast<fetch_node*>(*I);
498 assert(n->is_valid());
499
500 unsigned flags = n->bc.op_ptr->flags;
501
502 unsigned vtx = flags & FF_VTX;
503 unsigned num_src = vtx ? ctx.vtx_src_num : 4;
504
505 n->dst.resize(4);
506
507 if (flags & (FF_SETGRAD | FF_USEGRAD | FF_GETGRAD)) {
508 sh->uses_gradients = true;
509 }
510
511 if (flags & FF_SETGRAD) {
512
513 vvec *grad = NULL;
514
515 switch (n->bc.op) {
516 case FETCH_OP_SET_GRADIENTS_V:
517 grad = &grad_v;
518 break;
519 case FETCH_OP_SET_GRADIENTS_H:
520 grad = &grad_h;
521 break;
522 default:
523 assert(!"unexpected SET_GRAD instruction");
524 return -1;
525 }
526
527 if (grad->empty())
528 grad->resize(4);
529
530 for(unsigned s = 0; s < 4; ++s) {
531 unsigned sw = n->bc.src_sel[s];
532 if (sw <= SEL_W)
533 (*grad)[s] = sh->get_gpr_value(true, n->bc.src_gpr,
534 sw, false);
535 else if (sw == SEL_0)
536 (*grad)[s] = sh->get_const_value(0.0f);
537 else if (sw == SEL_1)
538 (*grad)[s] = sh->get_const_value(1.0f);
539 }
540 } else {
541
542 if (flags & FF_USEGRAD) {
543 n->src.resize(12);
544 std::copy(grad_v.begin(), grad_v.end(), n->src.begin() + 4);
545 std::copy(grad_h.begin(), grad_h.end(), n->src.begin() + 8);
546 } else {
547 n->src.resize(4);
548 }
549
550 for(int s = 0; s < 4; ++s) {
551 if (n->bc.dst_sel[s] != SEL_MASK)
552 n->dst[s] = sh->get_gpr_value(false, n->bc.dst_gpr, s, false);
553 // NOTE: it doesn't matter here which components of the result we
554 // are using, but original n->bc.dst_sel should be taken into
555 // account when building the bytecode
556 }
557 for(unsigned s = 0; s < num_src; ++s) {
558 if (n->bc.src_sel[s] <= SEL_W)
559 n->src[s] = sh->get_gpr_value(true, n->bc.src_gpr,
560 n->bc.src_sel[s], false);
561 }
562
563 }
564 }
565
566 return 0;
567 }
568
569 int bc_parser::prepare_ir() {
570
571 for(id_cf_map::iterator I = cf_map.begin(), E = cf_map.end(); I != E; ++I) {
572 cf_node *c = *I;
573
574 if (!c)
575 continue;
576
577 unsigned flags = c->bc.op_ptr->flags;
578
579 if (flags & CF_ALU) {
580 prepare_alu_clause(c);
581 } else if (flags & CF_FETCH) {
582 prepare_fetch_clause(c);
583 } else if (c->bc.op == CF_OP_CALL_FS) {
584 sh->init_call_fs(c);
585 c->flags |= NF_SCHEDULE_EARLY | NF_DONT_MOVE;
586 } else if (flags & CF_LOOP_START) {
587 prepare_loop(c);
588 } else if (c->bc.op == CF_OP_JUMP) {
589 prepare_if(c);
590 } else if (c->bc.op == CF_OP_LOOP_END) {
591 loop_stack.pop();
592 } else if (c->bc.op == CF_OP_LOOP_CONTINUE) {
593 assert(!loop_stack.empty());
594 repeat_node *rep = sh->create_repeat(loop_stack.top());
595 if (c->parent->first != c)
596 rep->move(c->parent->first, c);
597 c->replace_with(rep);
598 sh->simplify_dep_rep(rep);
599 } else if (c->bc.op == CF_OP_LOOP_BREAK) {
600 assert(!loop_stack.empty());
601 depart_node *dep = sh->create_depart(loop_stack.top());
602 if (c->parent->first != c)
603 dep->move(c->parent->first, c);
604 c->replace_with(dep);
605 sh->simplify_dep_rep(dep);
606 } else if (flags & CF_EXP) {
607
608 // unroll burst exports
609
610 assert(c->bc.op == CF_OP_EXPORT || c->bc.op == CF_OP_EXPORT_DONE);
611
612 c->bc.set_op(CF_OP_EXPORT);
613
614 unsigned burst_count = c->bc.burst_count;
615 unsigned eop = c->bc.end_of_program;
616
617 c->bc.end_of_program = 0;
618 c->bc.burst_count = 0;
619
620 do {
621 c->src.resize(4);
622
623 for(int s = 0; s < 4; ++s) {
624 switch (c->bc.sel[s]) {
625 case SEL_0:
626 c->src[s] = sh->get_const_value(0.0f);
627 break;
628 case SEL_1:
629 c->src[s] = sh->get_const_value(1.0f);
630 break;
631 case SEL_MASK:
632 break;
633 default:
634 if (c->bc.sel[s] <= SEL_W)
635 c->src[s] = sh->get_gpr_value(true, c->bc.rw_gpr,
636 c->bc.sel[s], false);
637 else
638 assert(!"invalid src_sel for export");
639 }
640 }
641
642 if (!burst_count--)
643 break;
644
645 cf_node *cf_next = sh->create_cf();
646 cf_next->bc = c->bc;
647 ++cf_next->bc.rw_gpr;
648 ++cf_next->bc.array_base;
649
650 c->insert_after(cf_next);
651 c = cf_next;
652
653 } while (1);
654
655 c->bc.end_of_program = eop;
656 } else if (flags & (CF_STRM | CF_RAT)) {
657
658 unsigned burst_count = c->bc.burst_count;
659 unsigned eop = c->bc.end_of_program;
660
661 c->bc.end_of_program = 0;
662 c->bc.burst_count = 0;
663
664 do {
665
666 c->src.resize(4);
667
668 for(int s = 0; s < 4; ++s) {
669 if (c->bc.comp_mask & (1 << s))
670 c->src[s] =
671 sh->get_gpr_value(true, c->bc.rw_gpr, s, false);
672 }
673
674 if ((flags & CF_RAT) && (c->bc.type & 1)) { // indexed write
675 c->src.resize(8);
676 for(int s = 0; s < 3; ++s) {
677 c->src[4 + s] =
678 sh->get_gpr_value(true, c->bc.index_gpr, s, false);
679 }
680
681 // FIXME probably we can relax it a bit
682 c->flags |= NF_DONT_HOIST | NF_DONT_MOVE;
683 }
684
685 if (!burst_count--)
686 break;
687
688 cf_node *cf_next = sh->create_cf();
689 cf_next->bc = c->bc;
690 ++cf_next->bc.rw_gpr;
691
692 // FIXME is it correct?
693 cf_next->bc.array_base += cf_next->bc.elem_size + 1;
694
695 c->insert_after(cf_next);
696 c = cf_next;
697 } while (1);
698
699 c->bc.end_of_program = eop;
700
701 }
702 }
703
704 assert(loop_stack.empty());
705 return 0;
706 }
707
708 int bc_parser::prepare_loop(cf_node* c) {
709
710 cf_node *end = cf_map[c->bc.addr - 1];
711 assert(end->bc.op == CF_OP_LOOP_END);
712 assert(c->parent == end->parent);
713
714 region_node *reg = sh->create_region();
715 repeat_node *rep = sh->create_repeat(reg);
716
717 reg->push_back(rep);
718 c->insert_before(reg);
719 rep->move(c, end->next);
720
721 loop_stack.push(reg);
722 return 0;
723 }
724
725 int bc_parser::prepare_if(cf_node* c) {
726 cf_node *c_else = NULL, *end = cf_map[c->bc.addr];
727
728 BCP_DUMP(
729 sblog << "parsing JUMP @" << c->bc.id;
730 sblog << "\n";
731 );
732
733 if (end->bc.op == CF_OP_ELSE) {
734 BCP_DUMP(
735 sblog << " found ELSE : ";
736 dump::dump_op(end);
737 sblog << "\n";
738 );
739
740 c_else = end;
741 end = cf_map[c_else->bc.addr];
742 } else {
743 BCP_DUMP(
744 sblog << " no else\n";
745 );
746
747 c_else = end;
748 }
749
750 if (c_else->parent != c->parent)
751 c_else = NULL;
752
753 if (end->parent != c->parent)
754 end = NULL;
755
756 region_node *reg = sh->create_region();
757
758 depart_node *dep2 = sh->create_depart(reg);
759 depart_node *dep = sh->create_depart(reg);
760 if_node *n_if = sh->create_if();
761
762 c->insert_before(reg);
763
764 if (c_else != end)
765 dep->move(c_else, end);
766 dep2->move(c, end);
767
768 reg->push_back(dep);
769 dep->push_front(n_if);
770 n_if->push_back(dep2);
771
772 n_if->cond = sh->get_special_value(SV_EXEC_MASK);
773
774 return 0;
775 }
776
777
778 } // namespace r600_sb