2 * Copyright © 2010 Intel Corporation
3 * Copyright © 2014 Broadcom
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 * @file vc4_qpu_schedule.c
28 * The basic model of the list scheduler is to take a basic block, compute a
29 * DAG of the dependencies, and make a list of the DAG heads. Heuristically
30 * pick a DAG head, then put all the children that are now DAG heads into the
31 * list of things to schedule.
33 * The goal of scheduling here is to pack pairs of operations together in a
34 * single QPU instruction.
39 #include "util/ralloc.h"
43 struct schedule_node_child
;
45 struct schedule_node
{
46 struct list_head link
;
47 struct queued_qpu_inst
*inst
;
48 struct schedule_node_child
*children
;
50 uint32_t child_array_size
;
51 uint32_t parent_count
;
54 * Minimum number of cycles from scheduling this instruction until the
55 * end of the program, based on the slowest dependency chain through
61 * cycles between this instruction being scheduled and when its result
67 * Which uniform from uniform_data[] this instruction read, or -1 if
68 * not reading a uniform.
73 struct schedule_node_child
{
74 struct schedule_node
*node
;
75 bool write_after_read
;
78 /* When walking the instructions in reverse, we need to swap before/after in
81 enum direction
{ F
, R
};
83 struct schedule_state
{
84 struct schedule_node
*last_r
[6];
85 struct schedule_node
*last_ra
[32];
86 struct schedule_node
*last_rb
[32];
87 struct schedule_node
*last_sf
;
88 struct schedule_node
*last_vpm_read
;
89 struct schedule_node
*last_tmu_write
;
90 struct schedule_node
*last_tlb
;
91 struct schedule_node
*last_vpm
;
96 add_dep(struct schedule_state
*state
,
97 struct schedule_node
*before
,
98 struct schedule_node
*after
,
101 bool write_after_read
= !write
&& state
->dir
== R
;
103 if (!before
|| !after
)
106 assert(before
!= after
);
108 if (state
->dir
== R
) {
109 struct schedule_node
*t
= before
;
114 for (int i
= 0; i
< before
->child_count
; i
++) {
115 if (before
->children
[i
].node
== after
&&
116 (before
->children
[i
].write_after_read
== write_after_read
)) {
121 if (before
->child_array_size
<= before
->child_count
) {
122 before
->child_array_size
= MAX2(before
->child_array_size
* 2, 16);
123 before
->children
= reralloc(before
, before
->children
,
124 struct schedule_node_child
,
125 before
->child_array_size
);
128 before
->children
[before
->child_count
].node
= after
;
129 before
->children
[before
->child_count
].write_after_read
=
131 before
->child_count
++;
132 after
->parent_count
++;
136 add_read_dep(struct schedule_state
*state
,
137 struct schedule_node
*before
,
138 struct schedule_node
*after
)
140 add_dep(state
, before
, after
, false);
144 add_write_dep(struct schedule_state
*state
,
145 struct schedule_node
**before
,
146 struct schedule_node
*after
)
148 add_dep(state
, *before
, after
, true);
153 qpu_writes_r4(uint64_t inst
)
155 uint32_t sig
= QPU_GET_FIELD(inst
, QPU_SIG
);
158 case QPU_SIG_COLOR_LOAD
:
159 case QPU_SIG_LOAD_TMU0
:
160 case QPU_SIG_LOAD_TMU1
:
161 case QPU_SIG_ALPHA_MASK_LOAD
:
169 process_raddr_deps(struct schedule_state
*state
, struct schedule_node
*n
,
170 uint32_t raddr
, bool is_a
)
174 add_write_dep(state
, &state
->last_r
[5], n
);
178 add_write_dep(state
, &state
->last_vpm_read
, n
);
184 case QPU_R_XY_PIXEL_COORD
:
185 case QPU_R_MS_REV_FLAGS
:
191 add_read_dep(state
, state
->last_ra
[raddr
], n
);
193 add_read_dep(state
, state
->last_rb
[raddr
], n
);
195 fprintf(stderr
, "unknown raddr %d\n", raddr
);
203 is_tmu_write(uint32_t waddr
)
221 reads_uniform(uint64_t inst
)
223 if (QPU_GET_FIELD(inst
, QPU_SIG
) == QPU_SIG_LOAD_IMM
)
226 return (QPU_GET_FIELD(inst
, QPU_RADDR_A
) == QPU_R_UNIF
||
227 (QPU_GET_FIELD(inst
, QPU_RADDR_B
) == QPU_R_UNIF
&&
228 QPU_GET_FIELD(inst
, QPU_SIG
) != QPU_SIG_SMALL_IMM
) ||
229 is_tmu_write(QPU_GET_FIELD(inst
, QPU_WADDR_ADD
)) ||
230 is_tmu_write(QPU_GET_FIELD(inst
, QPU_WADDR_MUL
)));
234 process_mux_deps(struct schedule_state
*state
, struct schedule_node
*n
,
237 if (mux
!= QPU_MUX_A
&& mux
!= QPU_MUX_B
)
238 add_read_dep(state
, state
->last_r
[mux
], n
);
243 process_waddr_deps(struct schedule_state
*state
, struct schedule_node
*n
,
244 uint32_t waddr
, bool is_add
)
246 uint64_t inst
= n
->inst
->inst
;
247 bool is_a
= is_add
^ ((inst
& QPU_WS
) != 0);
251 add_write_dep(state
, &state
->last_ra
[waddr
], n
);
253 add_write_dep(state
, &state
->last_rb
[waddr
], n
);
255 } else if (is_tmu_write(waddr
)) {
256 add_write_dep(state
, &state
->last_tmu_write
, n
);
257 } else if (qpu_waddr_is_tlb(waddr
)) {
258 add_write_dep(state
, &state
->last_tlb
, n
);
266 add_write_dep(state
, &state
->last_r
[waddr
- QPU_W_ACC0
],
271 add_write_dep(state
, &state
->last_vpm
, n
);
274 case QPU_W_VPMVCD_SETUP
:
276 add_write_dep(state
, &state
->last_vpm_read
, n
);
278 add_write_dep(state
, &state
->last_vpm
, n
);
281 case QPU_W_SFU_RECIP
:
282 case QPU_W_SFU_RECIPSQRT
:
285 add_write_dep(state
, &state
->last_r
[4], n
);
288 case QPU_W_TLB_STENCIL_SETUP
:
289 /* This isn't a TLB operation that does things like
290 * implicitly lock the scoreboard, but it does have to
291 * appear before TLB_Z, and each of the TLB_STENCILs
292 * have to schedule in the same order relative to each
295 add_write_dep(state
, &state
->last_tlb
, n
);
302 fprintf(stderr
, "Unknown waddr %d\n", waddr
);
309 process_cond_deps(struct schedule_state
*state
, struct schedule_node
*n
,
314 case QPU_COND_ALWAYS
:
317 add_read_dep(state
, state
->last_sf
, n
);
323 * Common code for dependencies that need to be tracked both forward and
326 * This is for things like "all reads of r4 have to happen between the r4
327 * writes that surround them".
330 calculate_deps(struct schedule_state
*state
, struct schedule_node
*n
)
332 uint64_t inst
= n
->inst
->inst
;
333 uint32_t add_op
= QPU_GET_FIELD(inst
, QPU_OP_ADD
);
334 uint32_t mul_op
= QPU_GET_FIELD(inst
, QPU_OP_MUL
);
335 uint32_t waddr_add
= QPU_GET_FIELD(inst
, QPU_WADDR_ADD
);
336 uint32_t waddr_mul
= QPU_GET_FIELD(inst
, QPU_WADDR_MUL
);
337 uint32_t raddr_a
= QPU_GET_FIELD(inst
, QPU_RADDR_A
);
338 uint32_t raddr_b
= QPU_GET_FIELD(inst
, QPU_RADDR_B
);
339 uint32_t add_a
= QPU_GET_FIELD(inst
, QPU_ADD_A
);
340 uint32_t add_b
= QPU_GET_FIELD(inst
, QPU_ADD_B
);
341 uint32_t mul_a
= QPU_GET_FIELD(inst
, QPU_MUL_A
);
342 uint32_t mul_b
= QPU_GET_FIELD(inst
, QPU_MUL_B
);
343 uint32_t sig
= QPU_GET_FIELD(inst
, QPU_SIG
);
345 if (sig
!= QPU_SIG_LOAD_IMM
) {
346 process_raddr_deps(state
, n
, raddr_a
, true);
347 if (sig
!= QPU_SIG_SMALL_IMM
)
348 process_raddr_deps(state
, n
, raddr_b
, false);
351 if (add_op
!= QPU_A_NOP
) {
352 process_mux_deps(state
, n
, add_a
);
353 process_mux_deps(state
, n
, add_b
);
355 if (mul_op
!= QPU_M_NOP
) {
356 process_mux_deps(state
, n
, mul_a
);
357 process_mux_deps(state
, n
, mul_b
);
360 process_waddr_deps(state
, n
, waddr_add
, true);
361 process_waddr_deps(state
, n
, waddr_mul
, false);
362 if (qpu_writes_r4(inst
))
363 add_write_dep(state
, &state
->last_r
[4], n
);
366 case QPU_SIG_SW_BREAKPOINT
:
368 case QPU_SIG_THREAD_SWITCH
:
369 case QPU_SIG_LAST_THREAD_SWITCH
:
370 case QPU_SIG_SMALL_IMM
:
371 case QPU_SIG_LOAD_IMM
:
374 case QPU_SIG_LOAD_TMU0
:
375 case QPU_SIG_LOAD_TMU1
:
376 /* TMU loads are coming from a FIFO, so ordering is important.
378 add_write_dep(state
, &state
->last_tmu_write
, n
);
381 case QPU_SIG_COLOR_LOAD
:
382 add_read_dep(state
, state
->last_tlb
, n
);
385 case QPU_SIG_PROG_END
:
386 case QPU_SIG_WAIT_FOR_SCOREBOARD
:
387 case QPU_SIG_SCOREBOARD_UNLOCK
:
388 case QPU_SIG_COVERAGE_LOAD
:
389 case QPU_SIG_COLOR_LOAD_END
:
390 case QPU_SIG_ALPHA_MASK_LOAD
:
392 fprintf(stderr
, "Unhandled signal bits %d\n", sig
);
396 process_cond_deps(state
, n
, QPU_GET_FIELD(inst
, QPU_COND_ADD
));
397 process_cond_deps(state
, n
, QPU_GET_FIELD(inst
, QPU_COND_ADD
));
399 add_write_dep(state
, &state
->last_sf
, n
);
403 calculate_forward_deps(struct vc4_compile
*c
, struct list_head
*schedule_list
)
405 struct schedule_state state
;
407 memset(&state
, 0, sizeof(state
));
410 list_for_each_entry(struct schedule_node
, node
, schedule_list
, link
)
411 calculate_deps(&state
, node
);
415 calculate_reverse_deps(struct vc4_compile
*c
, struct list_head
*schedule_list
)
417 struct list_head
*node
;
418 struct schedule_state state
;
420 memset(&state
, 0, sizeof(state
));
423 for (node
= schedule_list
->prev
; schedule_list
!= node
; node
= node
->prev
) {
424 calculate_deps(&state
, (struct schedule_node
*)node
);
428 struct choose_scoreboard
{
430 int last_sfu_write_tick
;
431 uint32_t last_waddr_a
, last_waddr_b
;
435 reads_too_soon_after_write(struct choose_scoreboard
*scoreboard
, uint64_t inst
)
437 uint32_t raddr_a
= QPU_GET_FIELD(inst
, QPU_RADDR_A
);
438 uint32_t raddr_b
= QPU_GET_FIELD(inst
, QPU_RADDR_B
);
439 uint32_t sig
= QPU_GET_FIELD(inst
, QPU_SIG
);
440 uint32_t src_muxes
[] = {
441 QPU_GET_FIELD(inst
, QPU_ADD_A
),
442 QPU_GET_FIELD(inst
, QPU_ADD_B
),
443 QPU_GET_FIELD(inst
, QPU_MUL_A
),
444 QPU_GET_FIELD(inst
, QPU_MUL_B
),
446 for (int i
= 0; i
< ARRAY_SIZE(src_muxes
); i
++) {
447 if ((src_muxes
[i
] == QPU_MUX_A
&&
449 scoreboard
->last_waddr_a
== raddr_a
) ||
450 (src_muxes
[i
] == QPU_MUX_B
&&
451 sig
!= QPU_SIG_SMALL_IMM
&&
453 scoreboard
->last_waddr_b
== raddr_b
)) {
457 if (src_muxes
[i
] == QPU_MUX_R4
) {
458 if (scoreboard
->tick
-
459 scoreboard
->last_sfu_write_tick
<= 2) {
469 pixel_scoreboard_too_soon(struct choose_scoreboard
*scoreboard
, uint64_t inst
)
471 return (scoreboard
->tick
< 2 && qpu_inst_is_tlb(inst
));
475 get_instruction_priority(uint64_t inst
)
477 uint32_t waddr_add
= QPU_GET_FIELD(inst
, QPU_WADDR_ADD
);
478 uint32_t waddr_mul
= QPU_GET_FIELD(inst
, QPU_WADDR_MUL
);
479 uint32_t sig
= QPU_GET_FIELD(inst
, QPU_SIG
);
480 uint32_t baseline_score
;
481 uint32_t next_score
= 0;
483 /* Schedule TLB operations as late as possible, to get more
484 * parallelism between shaders.
486 if (qpu_inst_is_tlb(inst
))
490 /* Schedule texture read results collection late to hide latency. */
491 if (sig
== QPU_SIG_LOAD_TMU0
|| sig
== QPU_SIG_LOAD_TMU1
)
495 /* Default score for things that aren't otherwise special. */
496 baseline_score
= next_score
;
499 /* Schedule texture read setup early to hide their latency better. */
500 if (is_tmu_write(waddr_add
) || is_tmu_write(waddr_mul
))
504 return baseline_score
;
507 static struct schedule_node
*
508 choose_instruction_to_schedule(struct choose_scoreboard
*scoreboard
,
509 struct list_head
*schedule_list
,
510 struct schedule_node
*prev_inst
)
512 struct schedule_node
*chosen
= NULL
;
515 list_for_each_entry(struct schedule_node
, n
, schedule_list
, link
) {
516 uint64_t inst
= n
->inst
->inst
;
518 /* "An instruction must not read from a location in physical
519 * regfile A or B that was written to by the previous
522 if (reads_too_soon_after_write(scoreboard
, inst
))
525 /* "A scoreboard wait must not occur in the first two
526 * instructions of a fragment shader. This is either the
527 * explicit Wait for Scoreboard signal or an implicit wait
528 * with the first tile-buffer read or write instruction."
530 if (pixel_scoreboard_too_soon(scoreboard
, inst
))
533 /* If we're trying to pair with another instruction, check
534 * that they're compatible.
537 if (prev_inst
->uniform
!= -1 && n
->uniform
!= -1)
540 inst
= qpu_merge_inst(prev_inst
->inst
->inst
, inst
);
545 int prio
= get_instruction_priority(inst
);
547 /* Found a valid instruction. If nothing better comes along,
556 if (prio
> chosen_prio
) {
559 } else if (prio
< chosen_prio
) {
563 if (n
->delay
> chosen
->delay
) {
566 } else if (n
->delay
< chosen
->delay
) {
575 update_scoreboard_for_chosen(struct choose_scoreboard
*scoreboard
,
578 uint32_t waddr_add
= QPU_GET_FIELD(inst
, QPU_WADDR_ADD
);
579 uint32_t waddr_mul
= QPU_GET_FIELD(inst
, QPU_WADDR_MUL
);
581 if (!(inst
& QPU_WS
)) {
582 scoreboard
->last_waddr_a
= waddr_add
;
583 scoreboard
->last_waddr_b
= waddr_mul
;
585 scoreboard
->last_waddr_b
= waddr_add
;
586 scoreboard
->last_waddr_a
= waddr_mul
;
589 if ((waddr_add
>= QPU_W_SFU_RECIP
&& waddr_add
<= QPU_W_SFU_LOG
) ||
590 (waddr_mul
>= QPU_W_SFU_RECIP
&& waddr_mul
<= QPU_W_SFU_LOG
)) {
591 scoreboard
->last_sfu_write_tick
= scoreboard
->tick
;
596 dump_state(struct list_head
*schedule_list
)
600 list_for_each_entry(struct schedule_node
, n
, schedule_list
, link
) {
601 fprintf(stderr
, "%3d: ", i
++);
602 vc4_qpu_disasm(&n
->inst
->inst
, 1);
603 fprintf(stderr
, "\n");
605 for (int i
= 0; i
< n
->child_count
; i
++) {
606 struct schedule_node
*child
= n
->children
[i
].node
;
610 fprintf(stderr
, " - ");
611 vc4_qpu_disasm(&child
->inst
->inst
, 1);
612 fprintf(stderr
, " (%d parents, %c)\n",
614 n
->children
[i
].write_after_read
? 'w' : 'r');
619 /** Recursive computation of the delay member of a node. */
621 compute_delay(struct schedule_node
*n
)
623 if (!n
->child_count
) {
626 for (int i
= 0; i
< n
->child_count
; i
++) {
627 if (!n
->children
[i
].node
->delay
)
628 compute_delay(n
->children
[i
].node
);
629 n
->delay
= MAX2(n
->delay
,
630 n
->children
[i
].node
->delay
+ n
->latency
);
636 mark_instruction_scheduled(struct list_head
*schedule_list
,
637 struct schedule_node
*node
,
643 for (int i
= node
->child_count
- 1; i
>= 0; i
--) {
644 struct schedule_node
*child
=
645 node
->children
[i
].node
;
650 if (war_only
&& !node
->children
[i
].write_after_read
)
653 child
->parent_count
--;
654 if (child
->parent_count
== 0)
655 list_add(&child
->link
, schedule_list
);
657 node
->children
[i
].node
= NULL
;
662 schedule_instructions(struct vc4_compile
*c
, struct list_head
*schedule_list
)
664 struct choose_scoreboard scoreboard
;
666 /* We reorder the uniforms as we schedule instructions, so save the
667 * old data off and replace it.
669 uint32_t *uniform_data
= c
->uniform_data
;
670 enum quniform_contents
*uniform_contents
= c
->uniform_contents
;
671 c
->uniform_contents
= ralloc_array(c
, enum quniform_contents
,
673 c
->uniform_data
= ralloc_array(c
, uint32_t, c
->num_uniforms
);
674 c
->uniform_array_size
= c
->num_uniforms
;
675 uint32_t next_uniform
= 0;
677 memset(&scoreboard
, 0, sizeof(scoreboard
));
678 scoreboard
.last_waddr_a
= ~0;
679 scoreboard
.last_waddr_b
= ~0;
680 scoreboard
.last_sfu_write_tick
= -10;
683 fprintf(stderr
, "initial deps:\n");
684 dump_state(schedule_list
);
685 fprintf(stderr
, "\n");
688 /* Remove non-DAG heads from the list. */
689 list_for_each_entry_safe(struct schedule_node
, n
, schedule_list
, link
) {
690 if (n
->parent_count
!= 0)
694 while (!list_empty(schedule_list
)) {
695 struct schedule_node
*chosen
=
696 choose_instruction_to_schedule(&scoreboard
,
699 struct schedule_node
*merge
= NULL
;
701 /* If there are no valid instructions to schedule, drop a NOP
704 uint64_t inst
= chosen
? chosen
->inst
->inst
: qpu_NOP();
707 fprintf(stderr
, "current list:\n");
708 dump_state(schedule_list
);
709 fprintf(stderr
, "chose: ");
710 vc4_qpu_disasm(&inst
, 1);
711 fprintf(stderr
, "\n");
714 /* Schedule this instruction onto the QPU list. Also try to
715 * find an instruction to pair with it.
718 list_del(&chosen
->link
);
719 mark_instruction_scheduled(schedule_list
, chosen
, true);
720 if (chosen
->uniform
!= -1) {
721 c
->uniform_data
[next_uniform
] =
722 uniform_data
[chosen
->uniform
];
723 c
->uniform_contents
[next_uniform
] =
724 uniform_contents
[chosen
->uniform
];
728 merge
= choose_instruction_to_schedule(&scoreboard
,
732 list_del(&merge
->link
);
733 inst
= qpu_merge_inst(inst
, merge
->inst
->inst
);
735 if (merge
->uniform
!= -1) {
736 c
->uniform_data
[next_uniform
] =
737 uniform_data
[merge
->uniform
];
738 c
->uniform_contents
[next_uniform
] =
739 uniform_contents
[merge
->uniform
];
744 fprintf(stderr
, "merging: ");
745 vc4_qpu_disasm(&merge
->inst
->inst
, 1);
746 fprintf(stderr
, "\n");
747 fprintf(stderr
, "resulting in: ");
748 vc4_qpu_disasm(&inst
, 1);
749 fprintf(stderr
, "\n");
755 fprintf(stderr
, "\n");
758 qpu_serialize_one_inst(c
, inst
);
760 update_scoreboard_for_chosen(&scoreboard
, inst
);
762 /* Now that we've scheduled a new instruction, some of its
763 * children can be promoted to the list of instructions ready to
764 * be scheduled. Update the children's unblocked time for this
765 * DAG edge as we do so.
767 mark_instruction_scheduled(schedule_list
, chosen
, false);
768 mark_instruction_scheduled(schedule_list
, merge
, false);
773 assert(next_uniform
== c
->num_uniforms
);
776 static uint32_t waddr_latency(uint32_t waddr
)
781 /* Some huge number, really. */
782 if (waddr
>= QPU_W_TMU0_S
&& waddr
<= QPU_W_TMU1_B
)
786 case QPU_W_SFU_RECIP
:
787 case QPU_W_SFU_RECIPSQRT
:
797 instruction_latency(uint64_t inst
)
799 return MAX2(waddr_latency(QPU_GET_FIELD(inst
, QPU_WADDR_ADD
)),
800 waddr_latency(QPU_GET_FIELD(inst
, QPU_WADDR_MUL
)));
804 qpu_schedule_instructions(struct vc4_compile
*c
)
806 void *mem_ctx
= ralloc_context(NULL
);
807 struct list_head schedule_list
;
809 list_inithead(&schedule_list
);
812 fprintf(stderr
, "Pre-schedule instructions\n");
813 list_for_each_entry(struct queued_qpu_inst
, q
,
814 &c
->qpu_inst_list
, link
) {
815 vc4_qpu_disasm(&q
->inst
, 1);
816 fprintf(stderr
, "\n");
818 fprintf(stderr
, "\n");
821 /* Wrap each instruction in a scheduler structure. */
822 uint32_t next_uniform
= 0;
823 while (!list_empty(&c
->qpu_inst_list
)) {
824 struct queued_qpu_inst
*inst
=
825 (struct queued_qpu_inst
*)c
->qpu_inst_list
.next
;
826 struct schedule_node
*n
= rzalloc(mem_ctx
, struct schedule_node
);
829 n
->latency
= instruction_latency(inst
->inst
);
831 if (reads_uniform(inst
->inst
)) {
832 n
->uniform
= next_uniform
++;
836 list_del(&inst
->link
);
837 list_addtail(&n
->link
, &schedule_list
);
839 assert(next_uniform
== c
->num_uniforms
);
841 calculate_forward_deps(c
, &schedule_list
);
842 calculate_reverse_deps(c
, &schedule_list
);
844 list_for_each_entry(struct schedule_node
, n
, &schedule_list
, link
) {
848 schedule_instructions(c
, &schedule_list
);
851 fprintf(stderr
, "Post-schedule instructions\n");
852 vc4_qpu_disasm(c
->qpu_insts
, c
->qpu_inst_count
);
853 fprintf(stderr
, "\n");
856 ralloc_free(mem_ctx
);