2 * Copyright (c) 2017 Lima Project
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sub license,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the
12 * next paragraph) shall be included in all copies or substantial portions
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
25 #include "util/ralloc.h"
26 #include "util/u_half.h"
27 #include "util/bitscan.h"
31 #include "lima_context.h"
33 static unsigned encode_swizzle(uint8_t *swizzle
, int shift
, int dest_shift
)
36 for (int i
= 0; i
< 4; i
++)
37 ret
|= ((swizzle
[i
] + shift
) & 0x3) << ((i
+ dest_shift
) * 2);
41 static int get_scl_reg_index(ppir_src
*src
, int component
)
43 int ret
= ppir_target_get_src_reg_index(src
);
44 ret
+= src
->swizzle
[component
];
48 static void ppir_codegen_encode_varying(ppir_node
*node
, void *code
)
50 ppir_codegen_field_varying
*f
= code
;
51 ppir_load_node
*load
= ppir_node_to_load(node
);
52 ppir_dest
*dest
= &load
->dest
;
53 int index
= ppir_target_get_dest_reg_index(dest
);
54 int num_components
= load
->num_components
;
57 assert(node
->op
== ppir_op_load_varying
||
58 node
->op
== ppir_op_load_coords
||
59 node
->op
== ppir_op_load_fragcoord
||
60 node
->op
== ppir_op_load_pointcoord
||
61 node
->op
== ppir_op_load_frontface
);
63 f
->imm
.dest
= index
>> 2;
64 f
->imm
.mask
= dest
->write_mask
<< (index
& 0x3);
66 int alignment
= num_components
== 3 ? 3 : num_components
- 1;
67 f
->imm
.alignment
= alignment
;
68 f
->imm
.offset_vector
= 0xf;
71 f
->imm
.index
= load
->index
>> 2;
73 f
->imm
.index
= load
->index
>> alignment
;
76 case ppir_op_load_fragcoord
:
77 f
->imm
.source_type
= 2;
78 f
->imm
.perspective
= 3;
80 case ppir_op_load_pointcoord
:
81 f
->imm
.source_type
= 3;
83 case ppir_op_load_frontface
:
84 f
->imm
.source_type
= 3;
85 f
->imm
.perspective
= 1;
92 assert(node
->op
== ppir_op_load_coords
);
94 f
->reg
.dest
= index
>> 2;
95 f
->reg
.mask
= dest
->write_mask
<< (index
& 0x3);
97 f
->reg
.source_type
= 1;
99 ppir_src
*src
= &load
->src
;
100 index
= ppir_target_get_src_reg_index(src
);
101 f
->reg
.source
= index
>> 2;
102 f
->reg
.negate
= src
->negate
;
103 f
->reg
.absolute
= src
->absolute
;
104 f
->reg
.swizzle
= encode_swizzle(src
->swizzle
, index
& 0x3, 0);
108 static void ppir_codegen_encode_texld(ppir_node
*node
, void *code
)
110 ppir_codegen_field_sampler
*f
= code
;
111 ppir_load_texture_node
*ldtex
= ppir_node_to_load_texture(node
);
113 f
->index
= ldtex
->sampler
;
115 f
->type
= ppir_codegen_sampler_type_2d
;
117 f
->unknown_2
= 0x39001;
120 static void ppir_codegen_encode_uniform(ppir_node
*node
, void *code
)
122 ppir_codegen_field_uniform
*f
= code
;
123 ppir_load_node
*load
= ppir_node_to_load(node
);
126 case ppir_op_load_uniform
:
127 f
->source
= ppir_codegen_uniform_src_uniform
;
129 case ppir_op_load_temp
:
130 f
->source
= ppir_codegen_uniform_src_temporary
;
136 int num_components
= load
->num_components
;
137 int alignment
= num_components
== 4 ? 2 : num_components
- 1;
139 f
->alignment
= alignment
;
141 /* TODO: uniform can be also combined like varying */
142 f
->index
= load
->index
<< (2 - alignment
);
145 static unsigned shift_to_op(int shift
)
147 assert(shift
>= -3 && shift
<= 3);
148 return shift
< 0 ? shift
+ 8 : shift
;
151 static void ppir_codegen_encode_vec_mul(ppir_node
*node
, void *code
)
153 ppir_codegen_field_vec4_mul
*f
= code
;
154 ppir_alu_node
*alu
= ppir_node_to_alu(node
);
156 ppir_dest
*dest
= &alu
->dest
;
158 if (dest
->type
!= ppir_target_pipeline
) {
159 int index
= ppir_target_get_dest_reg_index(dest
);
160 dest_shift
= index
& 0x3;
161 f
->dest
= index
>> 2;
162 f
->mask
= dest
->write_mask
<< dest_shift
;
164 f
->dest_modifier
= dest
->modifier
;
168 f
->op
= shift_to_op(alu
->shift
);
171 case ppir_op_store_color
:
172 f
->op
= ppir_codegen_vec4_mul_op_mov
;
175 f
->op
= ppir_codegen_vec4_mul_op_max
;
178 f
->op
= ppir_codegen_vec4_mul_op_min
;
181 f
->op
= ppir_codegen_vec4_mul_op_and
;
184 f
->op
= ppir_codegen_vec4_mul_op_or
;
187 f
->op
= ppir_codegen_vec4_mul_op_xor
;
190 f
->op
= ppir_codegen_vec4_mul_op_gt
;
193 f
->op
= ppir_codegen_vec4_mul_op_ge
;
196 f
->op
= ppir_codegen_vec4_mul_op_eq
;
199 f
->op
= ppir_codegen_vec4_mul_op_ne
;
202 f
->op
= ppir_codegen_vec4_mul_op_not
;
208 ppir_src
*src
= alu
->src
;
209 int index
= ppir_target_get_src_reg_index(src
);
210 f
->arg0_source
= index
>> 2;
211 f
->arg0_swizzle
= encode_swizzle(src
->swizzle
, index
& 0x3, dest_shift
);
212 f
->arg0_absolute
= src
->absolute
;
213 f
->arg0_negate
= src
->negate
;
215 if (alu
->num_src
== 2) {
217 index
= ppir_target_get_src_reg_index(src
);
218 f
->arg1_source
= index
>> 2;
219 f
->arg1_swizzle
= encode_swizzle(src
->swizzle
, index
& 0x3, dest_shift
);
220 f
->arg1_absolute
= src
->absolute
;
221 f
->arg1_negate
= src
->negate
;
225 static void ppir_codegen_encode_scl_mul(ppir_node
*node
, void *code
)
227 ppir_codegen_field_float_mul
*f
= code
;
228 ppir_alu_node
*alu
= ppir_node_to_alu(node
);
230 ppir_dest
*dest
= &alu
->dest
;
231 int dest_component
= ffs(dest
->write_mask
) - 1;
232 assert(dest_component
>= 0);
234 if (dest
->type
!= ppir_target_pipeline
) {
235 f
->dest
= ppir_target_get_dest_reg_index(dest
) + dest_component
;
238 f
->dest_modifier
= dest
->modifier
;
242 f
->op
= shift_to_op(alu
->shift
);
245 f
->op
= ppir_codegen_float_mul_op_mov
;
247 case ppir_op_sel_cond
:
248 f
->op
= ppir_codegen_float_mul_op_mov
;
251 f
->op
= ppir_codegen_float_mul_op_max
;
254 f
->op
= ppir_codegen_float_mul_op_min
;
257 f
->op
= ppir_codegen_float_mul_op_and
;
260 f
->op
= ppir_codegen_float_mul_op_or
;
263 f
->op
= ppir_codegen_float_mul_op_xor
;
266 f
->op
= ppir_codegen_float_mul_op_gt
;
269 f
->op
= ppir_codegen_float_mul_op_ge
;
272 f
->op
= ppir_codegen_float_mul_op_eq
;
275 f
->op
= ppir_codegen_float_mul_op_ne
;
278 f
->op
= ppir_codegen_float_mul_op_not
;
284 ppir_src
*src
= alu
->src
;
285 f
->arg0_source
= get_scl_reg_index(src
, dest_component
);
286 f
->arg0_absolute
= src
->absolute
;
287 f
->arg0_negate
= src
->negate
;
289 if (alu
->num_src
== 2) {
291 f
->arg1_source
= get_scl_reg_index(src
, dest_component
);
292 f
->arg1_absolute
= src
->absolute
;
293 f
->arg1_negate
= src
->negate
;
297 static void ppir_codegen_encode_vec_add(ppir_node
*node
, void *code
)
299 ppir_codegen_field_vec4_acc
*f
= code
;
300 ppir_alu_node
*alu
= ppir_node_to_alu(node
);
302 ppir_dest
*dest
= &alu
->dest
;
303 int index
= ppir_target_get_dest_reg_index(dest
);
304 int dest_shift
= index
& 0x3;
305 f
->dest
= index
>> 2;
306 f
->mask
= dest
->write_mask
<< dest_shift
;
307 f
->dest_modifier
= dest
->modifier
;
311 f
->op
= ppir_codegen_vec4_acc_op_add
;
314 case ppir_op_store_color
:
315 f
->op
= ppir_codegen_vec4_acc_op_mov
;
318 f
->op
= ppir_codegen_vec4_acc_op_sum3
;
322 f
->op
= ppir_codegen_vec4_acc_op_sum4
;
326 f
->op
= ppir_codegen_vec4_acc_op_floor
;
329 f
->op
= ppir_codegen_vec4_acc_op_ceil
;
332 f
->op
= ppir_codegen_vec4_acc_op_fract
;
335 f
->op
= ppir_codegen_vec4_acc_op_gt
;
338 f
->op
= ppir_codegen_vec4_acc_op_ge
;
341 f
->op
= ppir_codegen_vec4_acc_op_eq
;
344 f
->op
= ppir_codegen_vec4_acc_op_ne
;
347 f
->op
= ppir_codegen_vec4_acc_op_sel
;
350 f
->op
= ppir_codegen_vec4_acc_op_max
;
353 f
->op
= ppir_codegen_vec4_acc_op_min
;
356 f
->op
= ppir_codegen_vec4_acc_op_dFdx
;
359 f
->op
= ppir_codegen_vec4_acc_op_dFdy
;
365 ppir_src
*src
= node
->op
== ppir_op_select
? alu
->src
+ 1 : alu
->src
;
366 index
= ppir_target_get_src_reg_index(src
);
368 if (src
->type
== ppir_target_pipeline
&&
369 src
->pipeline
== ppir_pipeline_reg_vmul
)
372 f
->arg0_source
= index
>> 2;
374 f
->arg0_swizzle
= encode_swizzle(src
->swizzle
, index
& 0x3, dest_shift
);
375 f
->arg0_absolute
= src
->absolute
;
376 f
->arg0_negate
= src
->negate
;
378 if (++src
< alu
->src
+ alu
->num_src
) {
379 index
= ppir_target_get_src_reg_index(src
);
380 f
->arg1_source
= index
>> 2;
381 f
->arg1_swizzle
= encode_swizzle(src
->swizzle
, index
& 0x3, dest_shift
);
382 f
->arg1_absolute
= src
->absolute
;
383 f
->arg1_negate
= src
->negate
;
387 static void ppir_codegen_encode_scl_add(ppir_node
*node
, void *code
)
389 ppir_codegen_field_float_acc
*f
= code
;
390 ppir_alu_node
*alu
= ppir_node_to_alu(node
);
392 ppir_dest
*dest
= &alu
->dest
;
393 int dest_component
= ffs(dest
->write_mask
) - 1;
394 assert(dest_component
>= 0);
396 f
->dest
= ppir_target_get_dest_reg_index(dest
) + dest_component
;
398 f
->dest_modifier
= dest
->modifier
;
402 f
->op
= shift_to_op(alu
->shift
);
405 f
->op
= ppir_codegen_float_acc_op_mov
;
408 f
->op
= ppir_codegen_float_acc_op_max
;
411 f
->op
= ppir_codegen_float_acc_op_min
;
414 f
->op
= ppir_codegen_float_acc_op_floor
;
417 f
->op
= ppir_codegen_float_acc_op_ceil
;
420 f
->op
= ppir_codegen_float_acc_op_fract
;
423 f
->op
= ppir_codegen_float_acc_op_gt
;
426 f
->op
= ppir_codegen_float_acc_op_ge
;
429 f
->op
= ppir_codegen_float_acc_op_eq
;
432 f
->op
= ppir_codegen_float_acc_op_ne
;
435 f
->op
= ppir_codegen_float_acc_op_sel
;
438 f
->op
= ppir_codegen_float_acc_op_dFdx
;
441 f
->op
= ppir_codegen_float_acc_op_dFdy
;
447 ppir_src
*src
= node
->op
== ppir_op_select
? alu
->src
+ 1: alu
->src
;
448 if (src
->type
== ppir_target_pipeline
&&
449 src
->pipeline
== ppir_pipeline_reg_fmul
)
452 f
->arg0_source
= get_scl_reg_index(src
, dest_component
);
453 f
->arg0_absolute
= src
->absolute
;
454 f
->arg0_negate
= src
->negate
;
456 if (++src
< alu
->src
+ alu
->num_src
) {
457 f
->arg1_source
= get_scl_reg_index(src
, dest_component
);
458 f
->arg1_absolute
= src
->absolute
;
459 f
->arg1_negate
= src
->negate
;
463 static void ppir_codegen_encode_combine(ppir_node
*node
, void *code
)
465 ppir_codegen_field_combine
*f
= code
;
466 ppir_alu_node
*alu
= ppir_node_to_alu(node
);
477 f
->scalar
.dest_vec
= false;
478 f
->scalar
.arg1_en
= false;
480 ppir_dest
*dest
= &alu
->dest
;
481 int dest_component
= ffs(dest
->write_mask
) - 1;
482 assert(dest_component
>= 0);
483 f
->scalar
.dest
= ppir_target_get_dest_reg_index(dest
) + dest_component
;
484 f
->scalar
.dest_modifier
= dest
->modifier
;
486 ppir_src
*src
= alu
->src
;
487 f
->scalar
.arg0_src
= get_scl_reg_index(src
, dest_component
);
488 f
->scalar
.arg0_absolute
= src
->absolute
;
489 f
->scalar
.arg0_negate
= src
->negate
;
493 f
->scalar
.op
= ppir_codegen_combine_scalar_op_rsqrt
;
496 f
->scalar
.op
= ppir_codegen_combine_scalar_op_log2
;
499 f
->scalar
.op
= ppir_codegen_combine_scalar_op_exp2
;
502 f
->scalar
.op
= ppir_codegen_combine_scalar_op_rcp
;
505 f
->scalar
.op
= ppir_codegen_combine_scalar_op_sqrt
;
508 f
->scalar
.op
= ppir_codegen_combine_scalar_op_sin
;
511 f
->scalar
.op
= ppir_codegen_combine_scalar_op_cos
;
522 static void ppir_codegen_encode_store_temp(ppir_node
*node
, void *code
)
524 assert(node
->op
== ppir_op_store_temp
);
526 ppir_codegen_field_temp_write
*f
= code
;
527 ppir_store_node
*snode
= ppir_node_to_store(node
);
528 int num_components
= snode
->num_components
;
530 f
->temp_write
.dest
= 0x03; // 11 - temporary
531 f
->temp_write
.source
= snode
->src
.reg
->index
;
533 int alignment
= num_components
== 4 ? 2 : num_components
- 1;
534 f
->temp_write
.alignment
= alignment
;
535 f
->temp_write
.index
= snode
->index
<< (2 - alignment
);
537 f
->temp_write
.offset_reg
= snode
->index
>> 2;
540 static void ppir_codegen_encode_const(ppir_const
*constant
, uint16_t *code
)
542 for (int i
= 0; i
< constant
->num
; i
++)
543 code
[i
] = util_float_to_half(constant
->value
[i
].f
);
546 static void ppir_codegen_encode_discard(ppir_node
*node
, void *code
)
548 ppir_codegen_field_branch
*b
= code
;
549 assert(node
->op
== ppir_op_discard
);
551 b
->discard
.word0
= PPIR_CODEGEN_DISCARD_WORD0
;
552 b
->discard
.word1
= PPIR_CODEGEN_DISCARD_WORD1
;
553 b
->discard
.word2
= PPIR_CODEGEN_DISCARD_WORD2
;
556 static void ppir_codegen_encode_branch(ppir_node
*node
, void *code
)
558 ppir_codegen_field_branch
*b
= code
;
559 ppir_branch_node
*branch
;
560 ppir_instr
*target_instr
;
561 if (node
->op
== ppir_op_discard
) {
562 ppir_codegen_encode_discard(node
, code
);
566 assert(node
->op
== ppir_op_branch
);
567 branch
= ppir_node_to_branch(node
);
569 b
->branch
.unknown_0
= 0x0;
570 b
->branch
.unknown_1
= 0x0;
572 if (branch
->num_src
== 2) {
573 b
->branch
.arg0_source
= get_scl_reg_index(&branch
->src
[0], 0);
574 b
->branch
.arg1_source
= get_scl_reg_index(&branch
->src
[1], 0);
575 b
->branch
.cond_gt
= branch
->cond_gt
;
576 b
->branch
.cond_eq
= branch
->cond_eq
;
577 b
->branch
.cond_lt
= branch
->cond_lt
;
578 } else if (branch
->num_src
== 0) {
579 /* Unconditional branch */
580 b
->branch
.arg0_source
= 0;
581 b
->branch
.arg1_source
= 0;
582 b
->branch
.cond_gt
= true;
583 b
->branch
.cond_eq
= true;
584 b
->branch
.cond_lt
= true;
589 target_instr
= list_first_entry(&branch
->target
->instr_list
, ppir_instr
, list
);
590 b
->branch
.target
= target_instr
->offset
- node
->instr
->offset
;
591 b
->branch
.next_count
= target_instr
->encode_size
;
594 typedef void (*ppir_codegen_instr_slot_encode_func
)(ppir_node
*, void *);
596 static const ppir_codegen_instr_slot_encode_func
597 ppir_codegen_encode_slot
[PPIR_INSTR_SLOT_NUM
] = {
598 [PPIR_INSTR_SLOT_VARYING
] = ppir_codegen_encode_varying
,
599 [PPIR_INSTR_SLOT_TEXLD
] = ppir_codegen_encode_texld
,
600 [PPIR_INSTR_SLOT_UNIFORM
] = ppir_codegen_encode_uniform
,
601 [PPIR_INSTR_SLOT_ALU_VEC_MUL
] = ppir_codegen_encode_vec_mul
,
602 [PPIR_INSTR_SLOT_ALU_SCL_MUL
] = ppir_codegen_encode_scl_mul
,
603 [PPIR_INSTR_SLOT_ALU_VEC_ADD
] = ppir_codegen_encode_vec_add
,
604 [PPIR_INSTR_SLOT_ALU_SCL_ADD
] = ppir_codegen_encode_scl_add
,
605 [PPIR_INSTR_SLOT_ALU_COMBINE
] = ppir_codegen_encode_combine
,
606 [PPIR_INSTR_SLOT_STORE_TEMP
] = ppir_codegen_encode_store_temp
,
607 [PPIR_INSTR_SLOT_BRANCH
] = ppir_codegen_encode_branch
,
610 static const int ppir_codegen_field_size
[] = {
611 34, 62, 41, 43, 30, 44, 31, 30, 41, 73
614 static inline int align_to_word(int size
)
616 return ((size
+ 0x1f) >> 5);
619 static int get_instr_encode_size(ppir_instr
*instr
)
623 for (int i
= 0; i
< PPIR_INSTR_SLOT_NUM
; i
++) {
625 size
+= ppir_codegen_field_size
[i
];
628 for (int i
= 0; i
< 2; i
++) {
629 if (instr
->constant
[i
].num
)
633 return align_to_word(size
) + 1;
636 static void bitcopy(void *dst
, int dst_offset
, void *src
, int src_size
)
638 int off1
= dst_offset
& 0x1f;
639 uint32_t *cpy_dst
= dst
, *cpy_src
= src
;
641 cpy_dst
+= (dst_offset
>> 5);
644 int off2
= 32 - off1
;
647 *cpy_dst
|= *cpy_src
<< off1
;
651 if (cpy_size
>= src_size
)
654 *cpy_dst
|= *cpy_src
>> off2
;
658 if (cpy_size
>= src_size
)
663 memcpy(cpy_dst
, cpy_src
, align_to_word(src_size
) * 4);
666 static int encode_instr(ppir_instr
*instr
, void *code
, void *last_code
)
669 ppir_codegen_ctrl
*ctrl
= code
;
671 for (int i
= 0; i
< PPIR_INSTR_SLOT_NUM
; i
++) {
672 if (instr
->slots
[i
]) {
673 /* max field size (73), align to dword */
674 uint8_t output
[12] = {0};
676 ppir_codegen_encode_slot
[i
](instr
->slots
[i
], output
);
677 bitcopy(ctrl
+ 1, size
, output
, ppir_codegen_field_size
[i
]);
679 size
+= ppir_codegen_field_size
[i
];
680 ctrl
->fields
|= 1 << i
;
684 if (instr
->slots
[PPIR_INSTR_SLOT_TEXLD
])
687 if (instr
->slots
[PPIR_INSTR_SLOT_ALU_VEC_ADD
]) {
688 ppir_node
*node
= instr
->slots
[PPIR_INSTR_SLOT_ALU_VEC_ADD
];
689 if (node
->op
== ppir_op_ddx
|| node
->op
== ppir_op_ddy
)
693 if (instr
->slots
[PPIR_INSTR_SLOT_ALU_SCL_ADD
]) {
694 ppir_node
*node
= instr
->slots
[PPIR_INSTR_SLOT_ALU_SCL_ADD
];
695 if (node
->op
== ppir_op_ddx
|| node
->op
== ppir_op_ddy
)
699 for (int i
= 0; i
< 2; i
++) {
700 if (instr
->constant
[i
].num
) {
701 uint16_t output
[4] = {0};
703 ppir_codegen_encode_const(instr
->constant
+ i
, output
);
704 bitcopy(ctrl
+ 1, size
, output
, instr
->constant
[i
].num
* 16);
707 ctrl
->fields
|= 1 << (ppir_codegen_field_shift_vec4_const_0
+ i
);
711 size
= align_to_word(size
) + 1;
718 ppir_codegen_ctrl
*last_ctrl
= last_code
;
719 last_ctrl
->next_count
= size
;
720 last_ctrl
->prefetch
= true;
726 static void ppir_codegen_print_prog(ppir_compiler
*comp
)
728 uint32_t *prog
= comp
->prog
->shader
;
731 printf("========ppir codegen========\n");
732 list_for_each_entry(ppir_block
, block
, &comp
->block_list
, list
) {
733 list_for_each_entry(ppir_instr
, instr
, &block
->instr_list
, list
) {
734 printf("%03d (@%6d): ", instr
->index
, instr
->offset
);
735 int n
= prog
[0] & 0x1f;
736 for (int i
= 0; i
< n
; i
++) {
739 printf("%08x ", prog
[i
]);
742 ppir_disassemble_instr(prog
, offset
);
747 printf("-----------------------\n");
750 bool ppir_codegen_prog(ppir_compiler
*comp
)
753 list_for_each_entry(ppir_block
, block
, &comp
->block_list
, list
) {
754 list_for_each_entry(ppir_instr
, instr
, &block
->instr_list
, list
) {
755 instr
->offset
= size
;
756 instr
->encode_size
= get_instr_encode_size(instr
);
757 size
+= instr
->encode_size
;
761 uint32_t *prog
= rzalloc_size(comp
->prog
, size
* sizeof(uint32_t));
765 uint32_t *code
= prog
, *last_code
= NULL
;
766 list_for_each_entry(ppir_block
, block
, &comp
->block_list
, list
) {
767 list_for_each_entry(ppir_instr
, instr
, &block
->instr_list
, list
) {
768 int offset
= encode_instr(instr
, code
, last_code
);
774 comp
->prog
->shader
= prog
;
775 comp
->prog
->shader_size
= size
* sizeof(uint32_t);
777 if (lima_debug
& LIMA_DEBUG_PP
)
778 ppir_codegen_print_prog(comp
);