26520fec22f5d3e6ae7f6733bb6572462edd7fcf
[mesa.git] / src / gallium / drivers / vc4 / vc4_qpu_emit.c
1 /*
2 * Copyright © 2014 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <inttypes.h>
25
26 #include "vc4_context.h"
27 #include "vc4_qir.h"
28 #include "vc4_qpu.h"
29
30 static void
31 vc4_dump_program(struct vc4_compile *c)
32 {
33 fprintf(stderr, "%s:\n", qir_get_stage_name(c->stage));
34
35 for (int i = 0; i < c->qpu_inst_count; i++) {
36 fprintf(stderr, "0x%016"PRIx64" ", c->qpu_insts[i]);
37 vc4_qpu_disasm(&c->qpu_insts[i], 1);
38 fprintf(stderr, "\n");
39 }
40 }
41
42 struct queued_qpu_inst {
43 struct simple_node link;
44 uint64_t inst;
45 };
46
47 static void
48 queue(struct vc4_compile *c, uint64_t inst)
49 {
50 struct queued_qpu_inst *q = calloc(1, sizeof(*q));
51 q->inst = inst;
52 insert_at_tail(&c->qpu_inst_list, &q->link);
53 }
54
55 static uint64_t *
56 last_inst(struct vc4_compile *c)
57 {
58 struct queued_qpu_inst *q =
59 (struct queued_qpu_inst *)last_elem(&c->qpu_inst_list);
60 return &q->inst;
61 }
62
63 static void
64 set_last_cond_add(struct vc4_compile *c, uint32_t cond)
65 {
66 *last_inst(c) = qpu_set_cond_add(*last_inst(c), cond);
67 }
68
69 /**
70 * This is used to resolve the fact that we might register-allocate two
71 * different operands of an instruction to the same physical register file
72 * even though instructions have only one field for the register file source
73 * address.
74 *
75 * In that case, we need to move one to a temporary that can be used in the
76 * instruction, instead.
77 */
78 static void
79 fixup_raddr_conflict(struct vc4_compile *c,
80 struct qpu_reg src0, struct qpu_reg *src1)
81 {
82 if ((src0.mux == QPU_MUX_A || src0.mux == QPU_MUX_B) &&
83 (src1->mux == QPU_MUX_A || src1->mux == QPU_MUX_B) &&
84 src0.addr != src1->addr) {
85 queue(c, qpu_a_MOV(qpu_r3(), *src1));
86 *src1 = qpu_r3();
87 }
88 }
89
90 static void
91 serialize_one_inst(struct vc4_compile *c, uint64_t inst)
92 {
93 if (c->qpu_inst_count >= c->qpu_inst_size) {
94 c->qpu_inst_size = MAX2(16, c->qpu_inst_size * 2);
95 c->qpu_insts = realloc(c->qpu_insts,
96 c->qpu_inst_size * sizeof(uint64_t));
97 }
98 c->qpu_insts[c->qpu_inst_count++] = inst;
99 }
100
101 static void
102 serialize_insts(struct vc4_compile *c)
103 {
104 int last_sfu_write = -10;
105 bool scoreboard_wait_emitted = false;
106
107 while (!is_empty_list(&c->qpu_inst_list)) {
108 struct queued_qpu_inst *q =
109 (struct queued_qpu_inst *)first_elem(&c->qpu_inst_list);
110 uint32_t last_waddr_a = QPU_W_NOP, last_waddr_b = QPU_W_NOP;
111 uint32_t raddr_a = QPU_GET_FIELD(q->inst, QPU_RADDR_A);
112 uint32_t raddr_b = QPU_GET_FIELD(q->inst, QPU_RADDR_B);
113
114 if (c->qpu_inst_count > 0) {
115 uint64_t last_inst = c->qpu_insts[c->qpu_inst_count -
116 1];
117 uint32_t last_waddr_add = QPU_GET_FIELD(last_inst,
118 QPU_WADDR_ADD);
119 uint32_t last_waddr_mul = QPU_GET_FIELD(last_inst,
120 QPU_WADDR_MUL);
121
122 if (last_inst & QPU_WS) {
123 last_waddr_a = last_waddr_mul;
124 last_waddr_b = last_waddr_add;
125 } else {
126 last_waddr_a = last_waddr_add;
127 last_waddr_b = last_waddr_mul;
128 }
129 }
130
131 uint32_t src_muxes[] = {
132 QPU_GET_FIELD(q->inst, QPU_ADD_A),
133 QPU_GET_FIELD(q->inst, QPU_ADD_B),
134 QPU_GET_FIELD(q->inst, QPU_MUL_A),
135 QPU_GET_FIELD(q->inst, QPU_MUL_B),
136 };
137
138 /* "An instruction must not read from a location in physical
139 * regfile A or B that was written to by the previous
140 * instruction."
141 */
142 bool needs_raddr_vs_waddr_nop = false;
143 bool reads_r4 = false;
144 for (int i = 0; i < ARRAY_SIZE(src_muxes); i++) {
145 if ((raddr_a < 32 &&
146 src_muxes[i] == QPU_MUX_A &&
147 last_waddr_a == raddr_a) ||
148 (raddr_b < 32 &&
149 src_muxes[i] == QPU_MUX_B &&
150 last_waddr_b == raddr_b)) {
151 needs_raddr_vs_waddr_nop = true;
152 }
153 if (src_muxes[i] == QPU_MUX_R4)
154 reads_r4 = true;
155 }
156
157 if (needs_raddr_vs_waddr_nop) {
158 serialize_one_inst(c, qpu_NOP());
159 }
160
161 /* "After an SFU lookup instruction, accumulator r4 must not
162 * be read in the following two instructions. Any other
163 * instruction that results in r4 being written (that is, TMU
164 * read, TLB read, SFU lookup) cannot occur in the two
165 * instructions following an SFU lookup."
166 */
167 if (reads_r4) {
168 while (c->qpu_inst_count - last_sfu_write < 3) {
169 serialize_one_inst(c, qpu_NOP());
170 }
171 }
172
173 uint32_t waddr_a = QPU_GET_FIELD(q->inst, QPU_WADDR_ADD);
174 uint32_t waddr_m = QPU_GET_FIELD(q->inst, QPU_WADDR_MUL);
175 if ((waddr_a >= QPU_W_SFU_RECIP && waddr_a <= QPU_W_SFU_LOG) ||
176 (waddr_m >= QPU_W_SFU_RECIP && waddr_m <= QPU_W_SFU_LOG)) {
177 last_sfu_write = c->qpu_inst_count;
178 }
179
180 /* "A scoreboard wait must not occur in the first two
181 * instructions of a fragment shader. This is either the
182 * explicit Wait for Scoreboard signal or an implicit wait
183 * with the first tile-buffer read or write instruction."
184 */
185 if (!scoreboard_wait_emitted &&
186 (waddr_a == QPU_W_TLB_Z || waddr_m == QPU_W_TLB_Z ||
187 waddr_a == QPU_W_TLB_COLOR_MS ||
188 waddr_m == QPU_W_TLB_COLOR_MS ||
189 waddr_a == QPU_W_TLB_COLOR_ALL ||
190 waddr_m == QPU_W_TLB_COLOR_ALL ||
191 QPU_GET_FIELD(q->inst, QPU_SIG) == QPU_SIG_COLOR_LOAD)) {
192 while (c->qpu_inst_count < 3 ||
193 QPU_GET_FIELD(c->qpu_insts[c->qpu_inst_count - 1],
194 QPU_SIG) != QPU_SIG_NONE) {
195 serialize_one_inst(c, qpu_NOP());
196 }
197 c->qpu_insts[c->qpu_inst_count - 1] =
198 qpu_set_sig(c->qpu_insts[c->qpu_inst_count - 1],
199 QPU_SIG_WAIT_FOR_SCOREBOARD);
200 scoreboard_wait_emitted = true;
201 }
202
203 serialize_one_inst(c, q->inst);
204
205 remove_from_list(&q->link);
206 free(q);
207 }
208 }
209
210 void
211 vc4_generate_code(struct vc4_compile *c)
212 {
213 struct qpu_reg allocate_to_qpu_reg[4 + 32 + 32];
214 bool reg_in_use[ARRAY_SIZE(allocate_to_qpu_reg)];
215 int *reg_allocated = calloc(c->num_temps, sizeof(*reg_allocated));
216 int *reg_uses_remaining =
217 calloc(c->num_temps, sizeof(*reg_uses_remaining));
218 bool discard = false;
219
220 for (int i = 0; i < ARRAY_SIZE(reg_in_use); i++)
221 reg_in_use[i] = false;
222 for (int i = 0; i < c->num_temps; i++)
223 reg_allocated[i] = -1;
224
225 uint32_t next_reg = 0;
226 for (int i = 0; i < 4; i++)
227 allocate_to_qpu_reg[next_reg++] = qpu_rn(i == 3 ? 4 : i);
228 for (int i = 0; i < 32; i++)
229 allocate_to_qpu_reg[next_reg++] = qpu_ra(i);
230 for (int i = 0; i < 32; i++)
231 allocate_to_qpu_reg[next_reg++] = qpu_rb(i);
232 assert(next_reg == ARRAY_SIZE(allocate_to_qpu_reg));
233
234 make_empty_list(&c->qpu_inst_list);
235
236 struct simple_node *node;
237 foreach(node, &c->instructions) {
238 struct qinst *qinst = (struct qinst *)node;
239
240 if (qinst->dst.file == QFILE_TEMP)
241 reg_uses_remaining[qinst->dst.index]++;
242 for (int i = 0; i < qir_get_op_nsrc(qinst->op); i++) {
243 if (qinst->src[i].file == QFILE_TEMP)
244 reg_uses_remaining[qinst->src[i].index]++;
245 }
246 if (qinst->op == QOP_FRAG_Z)
247 reg_in_use[3 + 32 + QPU_R_FRAG_PAYLOAD_ZW] = true;
248 }
249
250 switch (c->stage) {
251 case QSTAGE_VERT:
252 case QSTAGE_COORD:
253 queue(c, qpu_load_imm_ui(qpu_vrsetup(),
254 (0x00001a00 +
255 0x00100000 * c->num_inputs)));
256 queue(c, qpu_load_imm_ui(qpu_vwsetup(), 0x00001a00));
257 break;
258 case QSTAGE_FRAG:
259 break;
260 }
261
262 foreach(node, &c->instructions) {
263 struct qinst *qinst = (struct qinst *)node;
264
265 #if 0
266 fprintf(stderr, "translating qinst to qpu: ");
267 qir_dump_inst(qinst);
268 fprintf(stderr, "\n");
269 #endif
270
271 static const struct {
272 uint32_t op;
273 bool is_mul;
274 } translate[] = {
275 #define A(name) [QOP_##name] = {QPU_A_##name, false}
276 #define M(name) [QOP_##name] = {QPU_M_##name, true}
277 A(FADD),
278 A(FSUB),
279 A(FMIN),
280 A(FMAX),
281 A(FMINABS),
282 A(FMAXABS),
283 A(FTOI),
284 A(ITOF),
285 A(ADD),
286 A(SUB),
287 A(SHL),
288 A(SHR),
289 A(ASR),
290 A(MIN),
291 A(MAX),
292 A(AND),
293 A(OR),
294 A(XOR),
295 A(NOT),
296
297 M(FMUL),
298 M(MUL24),
299 };
300
301 struct qpu_reg src[4];
302 for (int i = 0; i < qir_get_op_nsrc(qinst->op); i++) {
303 int index = qinst->src[i].index;
304 switch (qinst->src[i].file) {
305 case QFILE_NULL:
306 src[i] = qpu_rn(0);
307 break;
308 case QFILE_TEMP:
309 if (reg_allocated[index] == -1) {
310 fprintf(stderr, "undefined reg use: ");
311 qir_dump_inst(qinst);
312 fprintf(stderr, "\n");
313
314 src[i] = qpu_rn(0);
315 } else {
316 src[i] = allocate_to_qpu_reg[reg_allocated[index]];
317 reg_uses_remaining[index]--;
318 if (reg_uses_remaining[index] == 0)
319 reg_in_use[reg_allocated[index]] = false;
320 }
321 break;
322 case QFILE_UNIF:
323 src[i] = qpu_unif();
324 break;
325 case QFILE_VARY:
326 src[i] = qpu_vary();
327 break;
328 }
329 }
330
331 struct qpu_reg dst;
332 switch (qinst->dst.file) {
333 case QFILE_NULL:
334 dst = qpu_ra(QPU_W_NOP);
335 break;
336
337 case QFILE_TEMP:
338 if (reg_allocated[qinst->dst.index] == -1) {
339 int alloc;
340 for (alloc = 0;
341 alloc < ARRAY_SIZE(reg_in_use);
342 alloc++) {
343 struct qpu_reg reg = allocate_to_qpu_reg[alloc];
344
345 switch (qinst->op) {
346 case QOP_PACK_SCALED:
347 /* The pack flags require an
348 * A-file register.
349 */
350 if (reg.mux != QPU_MUX_A)
351 continue;
352 break;
353 case QOP_TEX_RESULT:
354 case QOP_TLB_COLOR_READ:
355 /* Only R4-generating
356 * instructions get to store
357 * values in R4 for now, until
358 * we figure out how to do
359 * interference.
360 */
361 if (reg.mux != QPU_MUX_R4)
362 continue;
363 break;
364 case QOP_FRAG_Z:
365 if (reg.mux != QPU_MUX_B ||
366 reg.addr != QPU_R_FRAG_PAYLOAD_ZW) {
367 continue;
368 }
369 break;
370 default:
371 if (reg.mux == QPU_MUX_R4)
372 continue;
373 break;
374 }
375
376 if (!reg_in_use[alloc])
377 break;
378 }
379 assert(alloc != ARRAY_SIZE(reg_in_use) && "need better reg alloc");
380 reg_in_use[alloc] = true;
381 reg_allocated[qinst->dst.index] = alloc;
382 }
383
384 dst = allocate_to_qpu_reg[reg_allocated[qinst->dst.index]];
385
386 reg_uses_remaining[qinst->dst.index]--;
387 if (reg_uses_remaining[qinst->dst.index] == 0) {
388 reg_in_use[reg_allocated[qinst->dst.index]] =
389 false;
390 }
391 break;
392
393 case QFILE_VARY:
394 case QFILE_UNIF:
395 assert(!"not reached");
396 break;
397 }
398
399 switch (qinst->op) {
400 case QOP_MOV:
401 /* Skip emitting the MOV if it's a no-op. */
402 if (dst.mux == QPU_MUX_A || dst.mux == QPU_MUX_B ||
403 dst.mux != src[0].mux || dst.addr != src[0].addr) {
404 queue(c, qpu_a_MOV(dst, src[0]));
405 }
406 break;
407
408 case QOP_SF:
409 queue(c, qpu_a_MOV(qpu_ra(QPU_W_NOP), src[0]));
410 *last_inst(c) |= QPU_SF;
411 break;
412
413 case QOP_SEL_X_0_ZS:
414 case QOP_SEL_X_0_ZC:
415 case QOP_SEL_X_0_NS:
416 case QOP_SEL_X_0_NC:
417 queue(c, qpu_a_MOV(dst, src[0]));
418 set_last_cond_add(c, qinst->op - QOP_SEL_X_0_ZS +
419 QPU_COND_ZS);
420
421 queue(c, qpu_a_XOR(dst, qpu_r0(), qpu_r0()));
422 set_last_cond_add(c, ((qinst->op - QOP_SEL_X_0_ZS) ^
423 1) + QPU_COND_ZS);
424 break;
425
426 case QOP_SEL_X_Y_ZS:
427 case QOP_SEL_X_Y_ZC:
428 case QOP_SEL_X_Y_NS:
429 case QOP_SEL_X_Y_NC:
430 queue(c, qpu_a_MOV(dst, src[0]));
431 set_last_cond_add(c, qinst->op - QOP_SEL_X_Y_ZS +
432 QPU_COND_ZS);
433
434 queue(c, qpu_a_MOV(dst, src[1]));
435 set_last_cond_add(c, ((qinst->op - QOP_SEL_X_Y_ZS) ^
436 1) + QPU_COND_ZS);
437
438 break;
439
440 case QOP_VPM_WRITE:
441 queue(c, qpu_a_MOV(qpu_ra(QPU_W_VPM), src[0]));
442 break;
443
444 case QOP_VPM_READ:
445 queue(c, qpu_a_MOV(dst, qpu_ra(QPU_R_VPM)));
446 break;
447
448 case QOP_RCP:
449 case QOP_RSQ:
450 case QOP_EXP2:
451 case QOP_LOG2:
452 switch (qinst->op) {
453 case QOP_RCP:
454 queue(c, qpu_a_MOV(qpu_rb(QPU_W_SFU_RECIP),
455 src[0]));
456 break;
457 case QOP_RSQ:
458 queue(c, qpu_a_MOV(qpu_rb(QPU_W_SFU_RECIPSQRT),
459 src[0]));
460 break;
461 case QOP_EXP2:
462 queue(c, qpu_a_MOV(qpu_rb(QPU_W_SFU_EXP),
463 src[0]));
464 break;
465 case QOP_LOG2:
466 queue(c, qpu_a_MOV(qpu_rb(QPU_W_SFU_LOG),
467 src[0]));
468 break;
469 default:
470 abort();
471 }
472
473 queue(c, qpu_a_MOV(dst, qpu_r4()));
474
475 break;
476
477 case QOP_PACK_COLORS:
478 for (int i = 0; i < 4; i++) {
479 queue(c, qpu_m_MOV(qpu_r3(), src[i]));
480 *last_inst(c) |= QPU_PM;
481 *last_inst(c) |= QPU_SET_FIELD(QPU_PACK_MUL_8A + i,
482 QPU_PACK);
483 }
484
485 queue(c, qpu_a_MOV(dst, qpu_r3()));
486
487 break;
488
489 case QOP_FRAG_X:
490 queue(c, qpu_a_ITOF(dst,
491 qpu_ra(QPU_R_XY_PIXEL_COORD)));
492 break;
493
494 case QOP_FRAG_Y:
495 queue(c, qpu_a_ITOF(dst,
496 qpu_rb(QPU_R_XY_PIXEL_COORD)));
497 break;
498
499 case QOP_FRAG_Z:
500 /* QOP_FRAG_Z doesn't emit instructions, just
501 * allocates the register to the Z payload.
502 */
503 break;
504
505 case QOP_FRAG_RCP_W:
506 queue(c, qpu_a_MOV(qpu_rb(QPU_W_SFU_RECIP),
507 qpu_ra(QPU_R_FRAG_PAYLOAD_ZW)));
508
509 queue(c, qpu_a_MOV(dst, qpu_r4()));
510 break;
511
512 case QOP_TLB_DISCARD_SETUP:
513 discard = true;
514 queue(c, qpu_a_MOV(src[0], src[0]));
515 *last_inst(c) |= QPU_SF;
516 break;
517
518 case QOP_TLB_Z_WRITE:
519 queue(c, qpu_a_MOV(qpu_ra(QPU_W_TLB_Z), src[0]));
520 if (discard) {
521 set_last_cond_add(c, QPU_COND_ZS);
522 }
523 break;
524
525 case QOP_TLB_COLOR_READ:
526 queue(c, qpu_NOP());
527 *last_inst(c) = qpu_set_sig(*last_inst(c),
528 QPU_SIG_COLOR_LOAD);
529
530 break;
531
532 case QOP_TLB_COLOR_WRITE:
533 queue(c, qpu_a_MOV(qpu_tlbc(), src[0]));
534 if (discard) {
535 set_last_cond_add(c, QPU_COND_ZS);
536 }
537 break;
538
539 case QOP_VARY_ADD_C:
540 queue(c, qpu_a_FADD(dst, src[0], qpu_r5()));
541 break;
542
543 case QOP_PACK_SCALED: {
544 uint64_t a = (qpu_a_MOV(dst, src[0]) |
545 QPU_SET_FIELD(QPU_PACK_A_16A,
546 QPU_PACK));
547 uint64_t b = (qpu_a_MOV(dst, src[1]) |
548 QPU_SET_FIELD(QPU_PACK_A_16B,
549 QPU_PACK));
550
551 if (dst.mux == src[1].mux && dst.addr == src[1].addr) {
552 queue(c, b);
553 queue(c, a);
554 } else {
555 queue(c, a);
556 queue(c, b);
557 }
558 break;
559 }
560
561 case QOP_TEX_S:
562 case QOP_TEX_T:
563 case QOP_TEX_R:
564 case QOP_TEX_B:
565 queue(c, qpu_a_MOV(qpu_rb(QPU_W_TMU0_S +
566 (qinst->op - QOP_TEX_S)),
567 src[0]));
568 break;
569
570 case QOP_TEX_RESULT:
571 queue(c, qpu_NOP());
572 *last_inst(c) = qpu_set_sig(*last_inst(c),
573 QPU_SIG_LOAD_TMU0);
574
575 break;
576
577 case QOP_R4_UNPACK_A:
578 case QOP_R4_UNPACK_B:
579 case QOP_R4_UNPACK_C:
580 case QOP_R4_UNPACK_D:
581 assert(src[0].mux == QPU_MUX_R4);
582 queue(c, qpu_a_MOV(dst, src[0]));
583 *last_inst(c) |= QPU_PM;
584 *last_inst(c) |= QPU_SET_FIELD(QPU_UNPACK_R4_8A +
585 (qinst->op -
586 QOP_R4_UNPACK_A),
587 QPU_UNPACK);
588
589 break;
590
591 default:
592 assert(qinst->op < ARRAY_SIZE(translate));
593 assert(translate[qinst->op].op != 0); /* NOPs */
594
595 /* If we have only one source, put it in the second
596 * argument slot as well so that we don't take up
597 * another raddr just to get unused data.
598 */
599 if (qir_get_op_nsrc(qinst->op) == 1)
600 src[1] = src[0];
601
602 fixup_raddr_conflict(c, src[0], &src[1]);
603
604 if (translate[qinst->op].is_mul) {
605 queue(c, qpu_m_alu2(translate[qinst->op].op,
606 dst,
607 src[0], src[1]));
608 } else {
609 queue(c, qpu_a_alu2(translate[qinst->op].op,
610 dst,
611 src[0], src[1]));
612 }
613 break;
614 }
615 }
616
617 serialize_insts(c);
618
619 /* thread end can't have VPM write */
620 if (QPU_GET_FIELD(c->qpu_insts[c->qpu_inst_count - 1],
621 QPU_WADDR_ADD) == QPU_W_VPM ||
622 QPU_GET_FIELD(c->qpu_insts[c->qpu_inst_count - 1],
623 QPU_WADDR_MUL) == QPU_W_VPM) {
624 serialize_one_inst(c, qpu_NOP());
625 }
626
627 c->qpu_insts[c->qpu_inst_count - 1] =
628 qpu_set_sig(c->qpu_insts[c->qpu_inst_count - 1],
629 QPU_SIG_PROG_END);
630 serialize_one_inst(c, qpu_NOP());
631 serialize_one_inst(c, qpu_NOP());
632
633 switch (c->stage) {
634 case QSTAGE_VERT:
635 case QSTAGE_COORD:
636 break;
637 case QSTAGE_FRAG:
638 c->qpu_insts[c->qpu_inst_count - 1] =
639 qpu_set_sig(c->qpu_insts[c->qpu_inst_count - 1],
640 QPU_SIG_SCOREBOARD_UNLOCK);
641 break;
642 }
643
644 if (vc4_debug & VC4_DEBUG_QPU)
645 vc4_dump_program(c);
646
647 vc4_qpu_validate(c->qpu_insts, c->qpu_inst_count);
648 }