pan/midgard: Report byte masks for read components
[mesa.git] / src / panfrost / midgard / mir.c
1 /*
2 * Copyright (C) 2019 Alyssa Rosenzweig <alyssa@rosenzweig.io>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #include "compiler.h"
25 #include "midgard_ops.h"
26
27 void mir_rewrite_index_src_single(midgard_instruction *ins, unsigned old, unsigned new)
28 {
29 for (unsigned i = 0; i < ARRAY_SIZE(ins->src); ++i) {
30 if (ins->src[i] == old)
31 ins->src[i] = new;
32 }
33 }
34
35 void mir_rewrite_index_dst_single(midgard_instruction *ins, unsigned old, unsigned new)
36 {
37 if (ins->dest == old)
38 ins->dest = new;
39 }
40
41 static midgard_vector_alu_src
42 mir_get_alu_src(midgard_instruction *ins, unsigned idx)
43 {
44 unsigned b = (idx == 0) ? ins->alu.src1 : ins->alu.src2;
45 return vector_alu_from_unsigned(b);
46 }
47
48 unsigned
49 mir_get_swizzle(midgard_instruction *ins, unsigned idx)
50 {
51 if (ins->type == TAG_ALU_4) {
52 if (idx == 2 || ins->compact_branch)
53 return ins->cond_swizzle;
54
55 return (mir_get_alu_src(ins, idx)).swizzle;
56 } else if (ins->type == TAG_LOAD_STORE_4) {
57 /* Main swizzle of a load is on the destination */
58 if (!OP_IS_STORE(ins->load_store.op))
59 idx++;
60
61 switch (idx) {
62 case 0:
63 return ins->load_store.swizzle;
64 case 1:
65 case 2: {
66 uint8_t raw =
67 (idx == 2) ? ins->load_store.arg_2 : ins->load_store.arg_1;
68
69 /* TODO: Integrate component count with properties */
70 unsigned components = 1;
71 switch (ins->load_store.op) {
72 case midgard_op_ld_int4:
73 components = (idx == 0) ? 2 : 1;
74 break;
75 case midgard_op_st_int4:
76 components = (idx == 1) ? 2 : 1;
77 break;
78 case midgard_op_ld_cubemap_coords:
79 components = 3;
80 break;
81 case midgard_op_ldst_perspective_division_z:
82 components = 3;
83 break;
84 case midgard_op_ldst_perspective_division_w:
85 components = 4;
86 break;
87 default:
88 components = 1;
89 break;
90 }
91
92 return component_to_swizzle(midgard_ldst_select(raw).component, components);
93 }
94 default:
95 unreachable("Unknown load/store source");
96 }
97 } else if (ins->type == TAG_TEXTURE_4) {
98 switch (idx) {
99 case 0:
100 return ins->texture.in_reg_swizzle;
101 case 1:
102 /* Swizzle on bias doesn't make sense */
103 return 0;
104 default:
105 unreachable("Unknown texture source");
106 }
107 } else {
108 unreachable("Unknown type");
109 }
110 }
111
112 void
113 mir_set_swizzle(midgard_instruction *ins, unsigned idx, unsigned new)
114 {
115 if (ins->type == TAG_ALU_4) {
116 if (idx == 2 || ins->compact_branch) {
117 ins->cond_swizzle = new;
118 return;
119 }
120
121 unsigned b = (idx == 0) ? ins->alu.src1 : ins->alu.src2;
122
123 midgard_vector_alu_src s =
124 vector_alu_from_unsigned(b);
125
126 s.swizzle = new;
127 unsigned pack = vector_alu_srco_unsigned(s);
128
129 if (idx == 0)
130 ins->alu.src1 = pack;
131 else
132 ins->alu.src2 = pack;
133 } else if (ins->type == TAG_LOAD_STORE_4) {
134 /* Main swizzle of a load is on the destination */
135 if (!OP_IS_STORE(ins->load_store.op))
136 idx++;
137
138 switch (idx) {
139 case 0:
140 ins->load_store.swizzle = new;
141 break;
142 case 1:
143 case 2: {
144 uint8_t raw =
145 (idx == 2) ? ins->load_store.arg_2 : ins->load_store.arg_1;
146
147 midgard_ldst_register_select sel
148 = midgard_ldst_select(raw);
149 sel.component = swizzle_to_component(new);
150 uint8_t packed = midgard_ldst_pack(sel);
151
152 if (idx == 2)
153 ins->load_store.arg_2 = packed;
154 else
155 ins->load_store.arg_1 = packed;
156
157 break;
158 }
159 default:
160 assert(new == 0);
161 break;
162 }
163 } else if (ins->type == TAG_TEXTURE_4) {
164 switch (idx) {
165 case 0:
166 ins->texture.in_reg_swizzle = new;
167 break;
168 default:
169 assert(new == 0);
170 break;
171 }
172 } else {
173 unreachable("Unknown type");
174 }
175 }
176
177 static void
178 mir_rewrite_index_src_single_swizzle(midgard_instruction *ins, unsigned old, unsigned new, unsigned swizzle)
179 {
180 for (unsigned i = 0; i < ARRAY_SIZE(ins->src); ++i) {
181 if (ins->src[i] != old) continue;
182
183 ins->src[i] = new;
184
185 mir_set_swizzle(ins, i,
186 pan_compose_swizzle(mir_get_swizzle(ins, i), swizzle));
187 }
188 }
189
190 void
191 mir_rewrite_index_src(compiler_context *ctx, unsigned old, unsigned new)
192 {
193 mir_foreach_instr_global(ctx, ins) {
194 mir_rewrite_index_src_single(ins, old, new);
195 }
196 }
197
198 void
199 mir_rewrite_index_src_swizzle(compiler_context *ctx, unsigned old, unsigned new, unsigned swizzle)
200 {
201 mir_foreach_instr_global(ctx, ins) {
202 mir_rewrite_index_src_single_swizzle(ins, old, new, swizzle);
203 }
204 }
205
206 void
207 mir_rewrite_index_dst(compiler_context *ctx, unsigned old, unsigned new)
208 {
209 mir_foreach_instr_global(ctx, ins) {
210 mir_rewrite_index_dst_single(ins, old, new);
211 }
212 }
213
214 void
215 mir_rewrite_index(compiler_context *ctx, unsigned old, unsigned new)
216 {
217 mir_rewrite_index_src(ctx, old, new);
218 mir_rewrite_index_dst(ctx, old, new);
219 }
220
221 unsigned
222 mir_use_count(compiler_context *ctx, unsigned value)
223 {
224 unsigned used_count = 0;
225
226 mir_foreach_instr_global(ctx, ins) {
227 if (mir_has_arg(ins, value))
228 ++used_count;
229 }
230
231 return used_count;
232 }
233
234 /* Checks if a value is used only once (or totally dead), which is an important
235 * heuristic to figure out if certain optimizations are Worth It (TM) */
236
237 bool
238 mir_single_use(compiler_context *ctx, unsigned value)
239 {
240 /* We can replicate constants in places so who cares */
241 if (value == SSA_FIXED_REGISTER(REGISTER_CONSTANT))
242 return true;
243
244 return mir_use_count(ctx, value) <= 1;
245 }
246
247 static bool
248 mir_nontrivial_raw_mod(midgard_vector_alu_src src, bool is_int)
249 {
250 if (is_int)
251 return src.mod == midgard_int_shift;
252 else
253 return src.mod;
254 }
255
256 bool
257 mir_nontrivial_mod(midgard_vector_alu_src src, bool is_int, unsigned mask)
258 {
259 if (mir_nontrivial_raw_mod(src, is_int)) return true;
260
261 /* size-conversion */
262 if (src.half) return true;
263
264 /* swizzle */
265 for (unsigned c = 0; c < 4; ++c) {
266 if (!(mask & (1 << c))) continue;
267 if (((src.swizzle >> (2*c)) & 3) != c) return true;
268 }
269
270 return false;
271 }
272
273 bool
274 mir_nontrivial_source2_mod(midgard_instruction *ins)
275 {
276 bool is_int = midgard_is_integer_op(ins->alu.op);
277
278 midgard_vector_alu_src src2 =
279 vector_alu_from_unsigned(ins->alu.src2);
280
281 return mir_nontrivial_mod(src2, is_int, ins->mask);
282 }
283
284 bool
285 mir_nontrivial_source2_mod_simple(midgard_instruction *ins)
286 {
287 bool is_int = midgard_is_integer_op(ins->alu.op);
288
289 midgard_vector_alu_src src2 =
290 vector_alu_from_unsigned(ins->alu.src2);
291
292 return mir_nontrivial_raw_mod(src2, is_int) || src2.half;
293 }
294
295 bool
296 mir_nontrivial_outmod(midgard_instruction *ins)
297 {
298 bool is_int = midgard_is_integer_op(ins->alu.op);
299 unsigned mod = ins->alu.outmod;
300
301 /* Pseudo-outmod */
302 if (ins->invert)
303 return true;
304
305 /* Type conversion is a sort of outmod */
306 if (ins->alu.dest_override != midgard_dest_override_none)
307 return true;
308
309 if (is_int)
310 return mod != midgard_outmod_int_wrap;
311 else
312 return mod != midgard_outmod_none;
313 }
314
315 /* Checks if an index will be used as a special register -- basically, if we're
316 * used as the input to a non-ALU op */
317
318 bool
319 mir_special_index(compiler_context *ctx, unsigned idx)
320 {
321 mir_foreach_instr_global(ctx, ins) {
322 bool is_ldst = ins->type == TAG_LOAD_STORE_4;
323 bool is_tex = ins->type == TAG_TEXTURE_4;
324 bool is_writeout = ins->compact_branch && ins->writeout;
325
326 if (!(is_ldst || is_tex || is_writeout))
327 continue;
328
329 if (mir_has_arg(ins, idx))
330 return true;
331 }
332
333 return false;
334 }
335
336 /* Is a node written before a given instruction? */
337
338 bool
339 mir_is_written_before(compiler_context *ctx, midgard_instruction *ins, unsigned node)
340 {
341 if (node >= SSA_FIXED_MINIMUM)
342 return true;
343
344 mir_foreach_instr_global(ctx, q) {
345 if (q == ins)
346 break;
347
348 if (q->dest == node)
349 return true;
350 }
351
352 return false;
353 }
354
355 /* Grabs the type size. */
356
357 midgard_reg_mode
358 mir_typesize(midgard_instruction *ins)
359 {
360 if (ins->compact_branch)
361 return midgard_reg_mode_32;
362
363 /* TODO: Type sizes for texture */
364 if (ins->type == TAG_TEXTURE_4)
365 return midgard_reg_mode_32;
366
367 if (ins->type == TAG_LOAD_STORE_4)
368 return GET_LDST_SIZE(load_store_opcode_props[ins->load_store.op].props);
369
370 if (ins->type == TAG_ALU_4) {
371 midgard_reg_mode mode = ins->alu.reg_mode;
372
373 /* If we have an override, step down by half */
374 if (ins->alu.dest_override != midgard_dest_override_none) {
375 assert(mode > midgard_reg_mode_8);
376 mode--;
377 }
378
379 return mode;
380 }
381
382 unreachable("Invalid instruction type");
383 }
384
385 /* Grabs the size of a source */
386
387 static midgard_reg_mode
388 mir_srcsize(midgard_instruction *ins, unsigned i)
389 {
390 /* TODO: 16-bit textures/ldst */
391 if (ins->type == TAG_TEXTURE_4 || ins->type == TAG_LOAD_STORE_4)
392 return midgard_reg_mode_32;
393
394 /* TODO: 16-bit branches */
395 if (ins->compact_branch)
396 return midgard_reg_mode_32;
397
398 if (i >= 2) {
399 /* TODO: 16-bit conditions, ffma */
400 assert(i == 2);
401 return midgard_reg_mode_32;
402 }
403
404 /* Default to type of the instruction */
405
406 midgard_reg_mode mode = ins->alu.reg_mode;
407
408 /* If we have a half modifier, step down by half */
409
410 if ((mir_get_alu_src(ins, i)).half) {
411 assert(mode > midgard_reg_mode_8);
412 mode--;
413 }
414
415 return mode;
416 }
417
418 /* Converts per-component mask to a byte mask */
419
420 static uint16_t
421 mir_to_bytemask(midgard_reg_mode mode, unsigned mask)
422 {
423 switch (mode) {
424 case midgard_reg_mode_8:
425 return mask;
426
427 case midgard_reg_mode_16: {
428 unsigned space =
429 ((mask & 0x1) << (0 - 0)) |
430 ((mask & 0x2) << (2 - 1)) |
431 ((mask & 0x4) << (4 - 2)) |
432 ((mask & 0x8) << (6 - 3)) |
433 ((mask & 0x10) << (8 - 4)) |
434 ((mask & 0x20) << (10 - 5)) |
435 ((mask & 0x40) << (12 - 6)) |
436 ((mask & 0x80) << (14 - 7));
437
438 return space | (space << 1);
439 }
440
441 case midgard_reg_mode_32: {
442 unsigned space =
443 ((mask & 0x1) << (0 - 0)) |
444 ((mask & 0x2) << (4 - 1)) |
445 ((mask & 0x4) << (8 - 2)) |
446 ((mask & 0x8) << (12 - 3));
447
448 return space | (space << 1) | (space << 2) | (space << 3);
449 }
450
451 case midgard_reg_mode_64: {
452 unsigned A = (mask & 0x1) ? 0xFF : 0x00;
453 unsigned B = (mask & 0x2) ? 0xFF : 0x00;
454 return A | (B << 8);
455 }
456
457 default:
458 unreachable("Invalid register mode");
459 }
460 }
461
462 /* ...and the inverse */
463
464 static unsigned
465 mir_bytes_for_mode(midgard_reg_mode mode)
466 {
467 switch (mode) {
468 case midgard_reg_mode_8:
469 return 1;
470 case midgard_reg_mode_16:
471 return 2;
472 case midgard_reg_mode_32:
473 return 4;
474 case midgard_reg_mode_64:
475 return 8;
476 default:
477 unreachable("Invalid register mode");
478 }
479 }
480
481 uint16_t
482 mir_from_bytemask(uint16_t bytemask, midgard_reg_mode mode)
483 {
484 unsigned value = 0;
485 unsigned count = mir_bytes_for_mode(mode);
486
487 for (unsigned c = 0, d = 0; c < 16; c += count, ++d) {
488 bool a = (bytemask & (1 << c)) != 0;
489
490 for (unsigned q = c; q < count; ++q)
491 assert(((bytemask & (1 << q)) != 0) == a);
492
493 value |= (a << d);
494 }
495
496 return value;
497 }
498
499 /* Rounds down a bytemask to fit a given component count. Iterate each
500 * component, and check if all bytes in the component are masked on */
501
502 uint16_t
503 mir_round_bytemask_down(uint16_t mask, midgard_reg_mode mode)
504 {
505 unsigned bytes = mir_bytes_for_mode(mode);
506 unsigned maxmask = mask_of(bytes);
507 unsigned channels = 16 / bytes;
508
509 for (unsigned c = 0; c < channels; ++c) {
510 /* Get bytes in component */
511 unsigned submask = (mask >> c * channels) & maxmask;
512
513 if (submask != maxmask)
514 mask &= ~(maxmask << (c * channels));
515 }
516
517 return mask;
518 }
519
520 /* Grabs the per-byte mask of an instruction (as opposed to per-component) */
521
522 uint16_t
523 mir_bytemask(midgard_instruction *ins)
524 {
525 return mir_to_bytemask(mir_typesize(ins), ins->mask);
526 }
527
528 /* Creates a mask of the components of a node read by an instruction, by
529 * analyzing the swizzle with respect to the instruction's mask. E.g.:
530 *
531 * fadd r0.xz, r1.yyyy, r2.zwyx
532 *
533 * will return a mask of Z/Y for r2
534 */
535
536 static uint16_t
537 mir_bytemask_of_read_components_single(unsigned swizzle, unsigned inmask, midgard_reg_mode mode)
538 {
539 unsigned cmask = 0;
540
541 for (unsigned c = 0; c < 4; ++c) {
542 if (!(inmask & (1 << c))) continue;
543
544 unsigned comp = (swizzle >> (2*c)) & 3;
545 cmask |= (1 << comp);
546 }
547
548 return mir_to_bytemask(mode, cmask);
549 }
550
551 static unsigned
552 mir_source_count(midgard_instruction *ins)
553 {
554 if (ins->type == TAG_ALU_4) {
555 /* ALU is always binary, except csel */
556 return OP_IS_CSEL(ins->alu.op) ? 3 : 2;
557 } else if (ins->type == TAG_LOAD_STORE_4) {
558 bool load = !OP_IS_STORE(ins->load_store.op);
559 return (load ? 2 : 3);
560 } else if (ins->type == TAG_TEXTURE_4) {
561 /* Coords, bias.. TODO: Offsets? */
562 return 2;
563 } else {
564 unreachable("Invalid instruction type");
565 }
566 }
567
568 uint16_t
569 mir_bytemask_of_read_components(midgard_instruction *ins, unsigned node)
570 {
571 uint16_t mask = 0;
572
573 for (unsigned i = 0; i < mir_source_count(ins); ++i) {
574 if (ins->src[i] != node) continue;
575
576 /* Branch writeout uses all components */
577 if (ins->compact_branch && ins->writeout && (i == 0))
578 return 0xFFFF;
579
580 /* Conditional branches read one 32-bit component = 4 bytes (TODO: multi branch??) */
581 if (ins->compact_branch && !ins->prepacked_branch && ins->branch.conditional && (i == 0))
582 return 0xF;
583
584 /* ALU ops act componentwise so we need to pay attention to
585 * their mask. Texture/ldst does not so we don't clamp source
586 * readmasks based on the writemask */
587 unsigned qmask = (ins->type == TAG_ALU_4) ? ins->mask : ~0;
588
589 /* Handle dot products and things */
590 if (ins->type == TAG_ALU_4 && !ins->compact_branch) {
591 unsigned props = alu_opcode_props[ins->alu.op].props;
592
593 unsigned channel_override = GET_CHANNEL_COUNT(props);
594
595 if (channel_override)
596 qmask = mask_of(channel_override);
597 }
598
599 unsigned swizzle = mir_get_swizzle(ins, i);
600 mask |= mir_bytemask_of_read_components_single(swizzle, qmask, mir_srcsize(ins, i));
601 }
602
603 return mask;
604 }
605
606 unsigned
607 mir_ubo_shift(midgard_load_store_op op)
608 {
609 switch (op) {
610 case midgard_op_ld_ubo_char:
611 return 0;
612 case midgard_op_ld_ubo_char2:
613 return 1;
614 case midgard_op_ld_ubo_char4:
615 return 2;
616 case midgard_op_ld_ubo_short4:
617 return 3;
618 case midgard_op_ld_ubo_int4:
619 return 4;
620 default:
621 unreachable("Invalid op");
622 }
623 }
624
625 /* Register allocation occurs after instruction scheduling, which is fine until
626 * we start needing to spill registers and therefore insert instructions into
627 * an already-scheduled program. We don't have to be terribly efficient about
628 * this, since spilling is already slow. So just semantically we need to insert
629 * the instruction into a new bundle before/after the bundle of the instruction
630 * in question */
631
632 static midgard_bundle
633 mir_bundle_for_op(compiler_context *ctx, midgard_instruction ins)
634 {
635 midgard_instruction *u = mir_upload_ins(ctx, ins);
636
637 midgard_bundle bundle = {
638 .tag = ins.type,
639 .instruction_count = 1,
640 .instructions = { u },
641 };
642
643 if (bundle.tag == TAG_ALU_4) {
644 assert(OP_IS_MOVE(u->alu.op));
645 u->unit = UNIT_VMUL;
646
647 size_t bytes_emitted = sizeof(uint32_t) + sizeof(midgard_reg_info) + sizeof(midgard_vector_alu);
648 bundle.padding = ~(bytes_emitted - 1) & 0xF;
649 bundle.control = ins.type | u->unit;
650 }
651
652 return bundle;
653 }
654
655 static unsigned
656 mir_bundle_idx_for_ins(midgard_instruction *tag, midgard_block *block)
657 {
658 midgard_bundle *bundles =
659 (midgard_bundle *) block->bundles.data;
660
661 size_t count = (block->bundles.size / sizeof(midgard_bundle));
662
663 for (unsigned i = 0; i < count; ++i) {
664 for (unsigned j = 0; j < bundles[i].instruction_count; ++j) {
665 if (bundles[i].instructions[j] == tag)
666 return i;
667 }
668 }
669
670 mir_print_instruction(tag);
671 unreachable("Instruction not scheduled in block");
672 }
673
674 void
675 mir_insert_instruction_before_scheduled(
676 compiler_context *ctx,
677 midgard_block *block,
678 midgard_instruction *tag,
679 midgard_instruction ins)
680 {
681 unsigned before = mir_bundle_idx_for_ins(tag, block);
682 size_t count = util_dynarray_num_elements(&block->bundles, midgard_bundle);
683 UNUSED void *unused = util_dynarray_grow(&block->bundles, midgard_bundle, 1);
684
685 midgard_bundle *bundles = (midgard_bundle *) block->bundles.data;
686 memmove(bundles + before + 1, bundles + before, (count - before) * sizeof(midgard_bundle));
687 midgard_bundle *before_bundle = bundles + before + 1;
688
689 midgard_bundle new = mir_bundle_for_op(ctx, ins);
690 memcpy(bundles + before, &new, sizeof(new));
691
692 list_addtail(&new.instructions[0]->link, &before_bundle->instructions[0]->link);
693 }
694
695 void
696 mir_insert_instruction_after_scheduled(
697 compiler_context *ctx,
698 midgard_block *block,
699 midgard_instruction *tag,
700 midgard_instruction ins)
701 {
702 /* We need to grow the bundles array to add our new bundle */
703 size_t count = util_dynarray_num_elements(&block->bundles, midgard_bundle);
704 UNUSED void *unused = util_dynarray_grow(&block->bundles, midgard_bundle, 1);
705
706 /* Find the bundle that we want to insert after */
707 unsigned after = mir_bundle_idx_for_ins(tag, block);
708
709 /* All the bundles after that one, we move ahead by one */
710 midgard_bundle *bundles = (midgard_bundle *) block->bundles.data;
711 memmove(bundles + after + 2, bundles + after + 1, (count - after - 1) * sizeof(midgard_bundle));
712 midgard_bundle *after_bundle = bundles + after;
713
714 midgard_bundle new = mir_bundle_for_op(ctx, ins);
715 memcpy(bundles + after + 1, &new, sizeof(new));
716 list_add(&new.instructions[0]->link, &after_bundle->instructions[after_bundle->instruction_count - 1]->link);
717 }
718
719 /* Flip the first-two arguments of a (binary) op. Currently ALU
720 * only, no known uses for ldst/tex */
721
722 void
723 mir_flip(midgard_instruction *ins)
724 {
725 unsigned temp = ins->src[0];
726 ins->src[0] = ins->src[1];
727 ins->src[1] = temp;
728
729 assert(ins->type == TAG_ALU_4);
730
731 temp = ins->alu.src1;
732 ins->alu.src1 = ins->alu.src2;
733 ins->alu.src2 = temp;
734 }
735
736 /* Before squashing, calculate ctx->temp_count just by observing the MIR */
737
738 void
739 mir_compute_temp_count(compiler_context *ctx)
740 {
741 if (ctx->temp_count)
742 return;
743
744 unsigned max_dest = 0;
745
746 mir_foreach_instr_global(ctx, ins) {
747 if (ins->dest < SSA_FIXED_MINIMUM)
748 max_dest = MAX2(max_dest, ins->dest + 1);
749 }
750
751 ctx->temp_count = max_dest;
752 }