2 * Copyright © 2017 Connor Abbott
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include "nir_serialize.h"
25 #include "nir_control_flow.h"
26 #include "util/u_dynarray.h"
27 #include "util/u_math.h"
29 #define NIR_SERIALIZE_FUNC_HAS_IMPL ((void *)(intptr_t)1)
30 #define MAX_OBJECT_IDS (1 << 20)
39 const nir_shader
*nir
;
43 /* maps pointer to index */
44 struct hash_table
*remap_table
;
46 /* the next index to assign to a NIR in-memory object */
49 /* Array of write_phi_fixup structs representing phi sources that need to
50 * be resolved in the second pass.
52 struct util_dynarray phi_fixups
;
54 /* The last serialized type. */
55 const struct glsl_type
*last_type
;
56 const struct glsl_type
*last_interface_type
;
57 struct nir_variable_data last_var_data
;
59 /* Don't write optional data such as variable names. */
66 struct blob_reader
*blob
;
68 /* the next index to assign to a NIR in-memory object */
71 /* The length of the index -> object table */
72 uint32_t idx_table_len
;
74 /* map from index to deserialized pointer */
77 /* List of phi sources. */
78 struct list_head phi_srcs
;
80 /* The last deserialized type. */
81 const struct glsl_type
*last_type
;
82 const struct glsl_type
*last_interface_type
;
83 struct nir_variable_data last_var_data
;
87 write_add_object(write_ctx
*ctx
, const void *obj
)
89 uint32_t index
= ctx
->next_idx
++;
90 assert(index
!= MAX_OBJECT_IDS
);
91 _mesa_hash_table_insert(ctx
->remap_table
, obj
, (void *)(uintptr_t) index
);
95 write_lookup_object(write_ctx
*ctx
, const void *obj
)
97 struct hash_entry
*entry
= _mesa_hash_table_search(ctx
->remap_table
, obj
);
99 return (uint32_t)(uintptr_t) entry
->data
;
103 write_object(write_ctx
*ctx
, const void *obj
)
105 blob_write_uint32(ctx
->blob
, write_lookup_object(ctx
, obj
));
109 read_add_object(read_ctx
*ctx
, void *obj
)
111 assert(ctx
->next_idx
< ctx
->idx_table_len
);
112 ctx
->idx_table
[ctx
->next_idx
++] = obj
;
116 read_lookup_object(read_ctx
*ctx
, uint32_t idx
)
118 assert(idx
< ctx
->idx_table_len
);
119 return ctx
->idx_table
[idx
];
123 read_object(read_ctx
*ctx
)
125 return read_lookup_object(ctx
, blob_read_uint32(ctx
->blob
));
129 encode_bit_size_3bits(uint8_t bit_size
)
131 /* Encode values of 0, 1, 2, 4, 8, 16, 32, 64 in 3 bits. */
132 assert(bit_size
<= 64 && util_is_power_of_two_or_zero(bit_size
));
134 return util_logbase2(bit_size
) + 1;
139 decode_bit_size_3bits(uint8_t bit_size
)
142 return 1 << (bit_size
- 1);
147 encode_num_components_in_3bits(uint8_t num_components
)
149 if (num_components
<= 4)
150 return num_components
;
151 if (num_components
== 8)
153 if (num_components
== 16)
156 unreachable("invalid number in num_components");
161 decode_num_components_in_3bits(uint8_t value
)
170 unreachable("invalid num_components encoding");
175 write_constant(write_ctx
*ctx
, const nir_constant
*c
)
177 blob_write_bytes(ctx
->blob
, c
->values
, sizeof(c
->values
));
178 blob_write_uint32(ctx
->blob
, c
->num_elements
);
179 for (unsigned i
= 0; i
< c
->num_elements
; i
++)
180 write_constant(ctx
, c
->elements
[i
]);
183 static nir_constant
*
184 read_constant(read_ctx
*ctx
, nir_variable
*nvar
)
186 nir_constant
*c
= ralloc(nvar
, nir_constant
);
188 blob_copy_bytes(ctx
->blob
, (uint8_t *)c
->values
, sizeof(c
->values
));
189 c
->num_elements
= blob_read_uint32(ctx
->blob
);
190 c
->elements
= ralloc_array(nvar
, nir_constant
*, c
->num_elements
);
191 for (unsigned i
= 0; i
< c
->num_elements
; i
++)
192 c
->elements
[i
] = read_constant(ctx
, nvar
);
197 enum var_data_encoding
{
199 var_encode_shader_temp
,
200 var_encode_function_temp
,
201 var_encode_location_diff
,
208 unsigned has_constant_initializer
:1;
209 unsigned has_interface_type
:1;
210 unsigned num_state_slots
:7;
211 unsigned data_encoding
:2;
212 unsigned type_same_as_last
:1;
213 unsigned interface_type_same_as_last
:1;
215 unsigned num_members
:16;
219 union packed_var_data_diff
{
224 int driver_location
:16;
229 write_variable(write_ctx
*ctx
, const nir_variable
*var
)
231 write_add_object(ctx
, var
);
233 assert(var
->num_state_slots
< (1 << 7));
234 assert(var
->num_members
< (1 << 16));
236 STATIC_ASSERT(sizeof(union packed_var
) == 4);
237 union packed_var flags
;
240 flags
.u
.has_name
= !ctx
->strip
&& var
->name
;
241 flags
.u
.has_constant_initializer
= !!(var
->constant_initializer
);
242 flags
.u
.has_interface_type
= !!(var
->interface_type
);
243 flags
.u
.type_same_as_last
= var
->type
== ctx
->last_type
;
244 flags
.u
.interface_type_same_as_last
=
245 var
->interface_type
&& var
->interface_type
== ctx
->last_interface_type
;
246 flags
.u
.num_state_slots
= var
->num_state_slots
;
247 flags
.u
.num_members
= var
->num_members
;
249 struct nir_variable_data data
= var
->data
;
251 /* When stripping, we expect that the location is no longer needed,
252 * which is typically after shaders are linked.
255 data
.mode
!= nir_var_shader_in
&&
256 data
.mode
!= nir_var_shader_out
)
259 /* Temporary variables don't serialize var->data. */
260 if (data
.mode
== nir_var_shader_temp
)
261 flags
.u
.data_encoding
= var_encode_shader_temp
;
262 else if (data
.mode
== nir_var_function_temp
)
263 flags
.u
.data_encoding
= var_encode_function_temp
;
265 struct nir_variable_data tmp
= data
;
267 tmp
.location
= ctx
->last_var_data
.location
;
268 tmp
.location_frac
= ctx
->last_var_data
.location_frac
;
269 tmp
.driver_location
= ctx
->last_var_data
.driver_location
;
271 /* See if we can encode only the difference in locations from the last
274 if (memcmp(&ctx
->last_var_data
, &tmp
, sizeof(tmp
)) == 0 &&
275 abs((int)data
.location
-
276 (int)ctx
->last_var_data
.location
) < (1 << 12) &&
277 abs((int)data
.driver_location
-
278 (int)ctx
->last_var_data
.driver_location
) < (1 << 15))
279 flags
.u
.data_encoding
= var_encode_location_diff
;
281 flags
.u
.data_encoding
= var_encode_full
;
284 blob_write_uint32(ctx
->blob
, flags
.u32
);
286 if (!flags
.u
.type_same_as_last
) {
287 encode_type_to_blob(ctx
->blob
, var
->type
);
288 ctx
->last_type
= var
->type
;
291 if (var
->interface_type
&& !flags
.u
.interface_type_same_as_last
) {
292 encode_type_to_blob(ctx
->blob
, var
->interface_type
);
293 ctx
->last_interface_type
= var
->interface_type
;
296 if (flags
.u
.has_name
)
297 blob_write_string(ctx
->blob
, var
->name
);
299 if (flags
.u
.data_encoding
== var_encode_full
||
300 flags
.u
.data_encoding
== var_encode_location_diff
) {
301 if (flags
.u
.data_encoding
== var_encode_full
) {
302 blob_write_bytes(ctx
->blob
, &data
, sizeof(data
));
304 /* Serialize only the difference in locations from the last variable.
306 union packed_var_data_diff diff
;
308 diff
.u
.location
= data
.location
- ctx
->last_var_data
.location
;
309 diff
.u
.location_frac
= data
.location_frac
-
310 ctx
->last_var_data
.location_frac
;
311 diff
.u
.driver_location
= data
.driver_location
-
312 ctx
->last_var_data
.driver_location
;
314 blob_write_uint32(ctx
->blob
, diff
.u32
);
317 ctx
->last_var_data
= data
;
320 for (unsigned i
= 0; i
< var
->num_state_slots
; i
++) {
321 blob_write_bytes(ctx
->blob
, &var
->state_slots
[i
],
322 sizeof(var
->state_slots
[i
]));
324 if (var
->constant_initializer
)
325 write_constant(ctx
, var
->constant_initializer
);
326 if (var
->num_members
> 0) {
327 blob_write_bytes(ctx
->blob
, (uint8_t *) var
->members
,
328 var
->num_members
* sizeof(*var
->members
));
332 static nir_variable
*
333 read_variable(read_ctx
*ctx
)
335 nir_variable
*var
= rzalloc(ctx
->nir
, nir_variable
);
336 read_add_object(ctx
, var
);
338 union packed_var flags
;
339 flags
.u32
= blob_read_uint32(ctx
->blob
);
341 if (flags
.u
.type_same_as_last
) {
342 var
->type
= ctx
->last_type
;
344 var
->type
= decode_type_from_blob(ctx
->blob
);
345 ctx
->last_type
= var
->type
;
348 if (flags
.u
.has_interface_type
) {
349 if (flags
.u
.interface_type_same_as_last
) {
350 var
->interface_type
= ctx
->last_interface_type
;
352 var
->interface_type
= decode_type_from_blob(ctx
->blob
);
353 ctx
->last_interface_type
= var
->interface_type
;
357 if (flags
.u
.has_name
) {
358 const char *name
= blob_read_string(ctx
->blob
);
359 var
->name
= ralloc_strdup(var
, name
);
364 if (flags
.u
.data_encoding
== var_encode_shader_temp
)
365 var
->data
.mode
= nir_var_shader_temp
;
366 else if (flags
.u
.data_encoding
== var_encode_function_temp
)
367 var
->data
.mode
= nir_var_function_temp
;
368 else if (flags
.u
.data_encoding
== var_encode_full
) {
369 blob_copy_bytes(ctx
->blob
, (uint8_t *) &var
->data
, sizeof(var
->data
));
370 ctx
->last_var_data
= var
->data
;
371 } else { /* var_encode_location_diff */
372 union packed_var_data_diff diff
;
373 diff
.u32
= blob_read_uint32(ctx
->blob
);
375 var
->data
= ctx
->last_var_data
;
376 var
->data
.location
+= diff
.u
.location
;
377 var
->data
.location_frac
+= diff
.u
.location_frac
;
378 var
->data
.driver_location
+= diff
.u
.driver_location
;
380 ctx
->last_var_data
= var
->data
;
383 var
->num_state_slots
= flags
.u
.num_state_slots
;
384 if (var
->num_state_slots
!= 0) {
385 var
->state_slots
= ralloc_array(var
, nir_state_slot
,
386 var
->num_state_slots
);
387 for (unsigned i
= 0; i
< var
->num_state_slots
; i
++) {
388 blob_copy_bytes(ctx
->blob
, &var
->state_slots
[i
],
389 sizeof(var
->state_slots
[i
]));
392 if (flags
.u
.has_constant_initializer
)
393 var
->constant_initializer
= read_constant(ctx
, var
);
395 var
->constant_initializer
= NULL
;
396 var
->num_members
= flags
.u
.num_members
;
397 if (var
->num_members
> 0) {
398 var
->members
= ralloc_array(var
, struct nir_variable_data
,
400 blob_copy_bytes(ctx
->blob
, (uint8_t *) var
->members
,
401 var
->num_members
* sizeof(*var
->members
));
408 write_var_list(write_ctx
*ctx
, const struct exec_list
*src
)
410 blob_write_uint32(ctx
->blob
, exec_list_length(src
));
411 foreach_list_typed(nir_variable
, var
, node
, src
) {
412 write_variable(ctx
, var
);
417 read_var_list(read_ctx
*ctx
, struct exec_list
*dst
)
419 exec_list_make_empty(dst
);
420 unsigned num_vars
= blob_read_uint32(ctx
->blob
);
421 for (unsigned i
= 0; i
< num_vars
; i
++) {
422 nir_variable
*var
= read_variable(ctx
);
423 exec_list_push_tail(dst
, &var
->node
);
428 write_register(write_ctx
*ctx
, const nir_register
*reg
)
430 write_add_object(ctx
, reg
);
431 blob_write_uint32(ctx
->blob
, reg
->num_components
);
432 blob_write_uint32(ctx
->blob
, reg
->bit_size
);
433 blob_write_uint32(ctx
->blob
, reg
->num_array_elems
);
434 blob_write_uint32(ctx
->blob
, reg
->index
);
435 blob_write_uint32(ctx
->blob
, !ctx
->strip
&& reg
->name
);
436 if (!ctx
->strip
&& reg
->name
)
437 blob_write_string(ctx
->blob
, reg
->name
);
440 static nir_register
*
441 read_register(read_ctx
*ctx
)
443 nir_register
*reg
= ralloc(ctx
->nir
, nir_register
);
444 read_add_object(ctx
, reg
);
445 reg
->num_components
= blob_read_uint32(ctx
->blob
);
446 reg
->bit_size
= blob_read_uint32(ctx
->blob
);
447 reg
->num_array_elems
= blob_read_uint32(ctx
->blob
);
448 reg
->index
= blob_read_uint32(ctx
->blob
);
449 bool has_name
= blob_read_uint32(ctx
->blob
);
451 const char *name
= blob_read_string(ctx
->blob
);
452 reg
->name
= ralloc_strdup(reg
, name
);
457 list_inithead(®
->uses
);
458 list_inithead(®
->defs
);
459 list_inithead(®
->if_uses
);
465 write_reg_list(write_ctx
*ctx
, const struct exec_list
*src
)
467 blob_write_uint32(ctx
->blob
, exec_list_length(src
));
468 foreach_list_typed(nir_register
, reg
, node
, src
)
469 write_register(ctx
, reg
);
473 read_reg_list(read_ctx
*ctx
, struct exec_list
*dst
)
475 exec_list_make_empty(dst
);
476 unsigned num_regs
= blob_read_uint32(ctx
->blob
);
477 for (unsigned i
= 0; i
< num_regs
; i
++) {
478 nir_register
*reg
= read_register(ctx
);
479 exec_list_push_tail(dst
, ®
->node
);
486 unsigned is_ssa
:1; /* <-- Header */
487 unsigned is_indirect
:1;
488 unsigned object_idx
:20;
489 unsigned _footer
:10; /* <-- Footer */
492 unsigned _header
:22; /* <-- Header */
493 unsigned negate
:1; /* <-- Footer */
495 unsigned swizzle_x
:2;
496 unsigned swizzle_y
:2;
497 unsigned swizzle_z
:2;
498 unsigned swizzle_w
:2;
501 unsigned _header
:22; /* <-- Header */
502 unsigned src_type
:5; /* <-- Footer */
508 write_src_full(write_ctx
*ctx
, const nir_src
*src
, union packed_src header
)
510 /* Since sources are very frequent, we try to save some space when storing
511 * them. In particular, we store whether the source is a register and
512 * whether the register has an indirect index in the low two bits. We can
513 * assume that the high two bits of the index are zero, since otherwise our
514 * address space would've been exhausted allocating the remap table!
516 header
.any
.is_ssa
= src
->is_ssa
;
518 header
.any
.object_idx
= write_lookup_object(ctx
, src
->ssa
);
519 blob_write_uint32(ctx
->blob
, header
.u32
);
521 header
.any
.object_idx
= write_lookup_object(ctx
, src
->reg
.reg
);
522 header
.any
.is_indirect
= !!src
->reg
.indirect
;
523 blob_write_uint32(ctx
->blob
, header
.u32
);
524 blob_write_uint32(ctx
->blob
, src
->reg
.base_offset
);
525 if (src
->reg
.indirect
) {
526 union packed_src header
= {0};
527 write_src_full(ctx
, src
->reg
.indirect
, header
);
533 write_src(write_ctx
*ctx
, const nir_src
*src
)
535 union packed_src header
= {0};
536 write_src_full(ctx
, src
, header
);
539 static union packed_src
540 read_src(read_ctx
*ctx
, nir_src
*src
, void *mem_ctx
)
542 STATIC_ASSERT(sizeof(union packed_src
) == 4);
543 union packed_src header
;
544 header
.u32
= blob_read_uint32(ctx
->blob
);
546 src
->is_ssa
= header
.any
.is_ssa
;
548 src
->ssa
= read_lookup_object(ctx
, header
.any
.object_idx
);
550 src
->reg
.reg
= read_lookup_object(ctx
, header
.any
.object_idx
);
551 src
->reg
.base_offset
= blob_read_uint32(ctx
->blob
);
552 if (header
.any
.is_indirect
) {
553 src
->reg
.indirect
= ralloc(mem_ctx
, nir_src
);
554 read_src(ctx
, src
->reg
.indirect
, mem_ctx
);
556 src
->reg
.indirect
= NULL
;
567 uint8_t num_components
:3;
572 uint8_t is_indirect
:1;
577 enum intrinsic_const_indices_encoding
{
578 /* Use the 6 bits of packed_const_indices to store 1-6 indices.
579 * 1 6-bit index, or 2 3-bit indices, or 3 2-bit indices, or
582 * The common case for load_ubo is 0, 0, 0, which is trivially represented.
583 * The common cases for load_interpolated_input also fit here, e.g.: 7, 3
585 const_indices_6bit_all_combined
,
587 const_indices_8bit
, /* 8 bits per element */
588 const_indices_16bit
, /* 16 bits per element */
589 const_indices_32bit
, /* 32 bits per element */
592 enum load_const_packing
{
593 /* Constants are not packed and are stored in following dwords. */
596 /* packed_value contains high 19 bits, low bits are 0,
597 * good for floating-point decimals
599 load_const_scalar_hi_19bits
,
601 /* packed_value contains low 19 bits, high bits are sign-extended */
602 load_const_scalar_lo_19bits_sext
,
608 unsigned instr_type
:4; /* always present */
610 unsigned dest
:8; /* always last */
613 unsigned instr_type
:4;
615 unsigned no_signed_wrap
:1;
616 unsigned no_unsigned_wrap
:1;
618 unsigned writemask
:4;
620 unsigned packed_src_ssa_16bit
:1;
625 unsigned instr_type
:4;
626 unsigned deref_type
:3;
627 unsigned cast_type_same_as_last
:1;
633 unsigned instr_type
:4;
634 unsigned intrinsic
:9;
635 unsigned num_components
:3;
636 unsigned const_indices_encoding
:2;
637 unsigned packed_const_indices
:6;
641 unsigned instr_type
:4;
642 unsigned last_component
:4;
644 unsigned packing
:2; /* enum load_const_packing */
645 unsigned packed_value
:19; /* meaning determined by packing */
648 unsigned instr_type
:4;
649 unsigned last_component
:4;
654 unsigned instr_type
:4;
657 unsigned texture_array_size
:12;
661 unsigned instr_type
:4;
662 unsigned num_srcs
:20;
666 unsigned instr_type
:4;
672 /* Write "lo24" as low 24 bits in the first uint32. */
674 write_dest(write_ctx
*ctx
, const nir_dest
*dst
, union packed_instr header
)
676 STATIC_ASSERT(sizeof(union packed_dest
) == 1);
677 union packed_dest dest
;
680 dest
.ssa
.is_ssa
= dst
->is_ssa
;
682 dest
.ssa
.has_name
= !ctx
->strip
&& dst
->ssa
.name
;
683 dest
.ssa
.num_components
=
684 encode_num_components_in_3bits(dst
->ssa
.num_components
);
685 dest
.ssa
.bit_size
= encode_bit_size_3bits(dst
->ssa
.bit_size
);
687 dest
.reg
.is_indirect
= !!(dst
->reg
.indirect
);
690 header
.any
.dest
= dest
.u8
;
691 blob_write_uint32(ctx
->blob
, header
.u32
);
694 write_add_object(ctx
, &dst
->ssa
);
695 if (dest
.ssa
.has_name
)
696 blob_write_string(ctx
->blob
, dst
->ssa
.name
);
698 blob_write_uint32(ctx
->blob
, write_lookup_object(ctx
, dst
->reg
.reg
));
699 blob_write_uint32(ctx
->blob
, dst
->reg
.base_offset
);
700 if (dst
->reg
.indirect
)
701 write_src(ctx
, dst
->reg
.indirect
);
706 read_dest(read_ctx
*ctx
, nir_dest
*dst
, nir_instr
*instr
,
707 union packed_instr header
)
709 union packed_dest dest
;
710 dest
.u8
= header
.any
.dest
;
712 if (dest
.ssa
.is_ssa
) {
713 unsigned bit_size
= decode_bit_size_3bits(dest
.ssa
.bit_size
);
714 unsigned num_components
=
715 decode_num_components_in_3bits(dest
.ssa
.num_components
);
716 char *name
= dest
.ssa
.has_name
? blob_read_string(ctx
->blob
) : NULL
;
717 nir_ssa_dest_init(instr
, dst
, num_components
, bit_size
, name
);
718 read_add_object(ctx
, &dst
->ssa
);
720 dst
->reg
.reg
= read_object(ctx
);
721 dst
->reg
.base_offset
= blob_read_uint32(ctx
->blob
);
722 if (dest
.reg
.is_indirect
) {
723 dst
->reg
.indirect
= ralloc(instr
, nir_src
);
724 read_src(ctx
, dst
->reg
.indirect
, instr
);
730 are_object_ids_16bit(write_ctx
*ctx
)
732 /* Check the highest object ID, because they are monotonic. */
733 return ctx
->next_idx
< (1 << 16);
737 is_alu_src_ssa_16bit(write_ctx
*ctx
, const nir_alu_instr
*alu
)
739 unsigned num_srcs
= nir_op_infos
[alu
->op
].num_inputs
;
741 for (unsigned i
= 0; i
< num_srcs
; i
++) {
742 if (!alu
->src
[i
].src
.is_ssa
|| alu
->src
[i
].abs
|| alu
->src
[i
].negate
)
745 unsigned src_components
= nir_ssa_alu_instr_src_components(alu
, i
);
747 for (unsigned chan
= 0; chan
< src_components
; chan
++) {
748 if (alu
->src
[i
].swizzle
[chan
] != chan
)
753 return are_object_ids_16bit(ctx
);
757 write_alu(write_ctx
*ctx
, const nir_alu_instr
*alu
)
759 unsigned num_srcs
= nir_op_infos
[alu
->op
].num_inputs
;
760 /* 9 bits for nir_op */
761 STATIC_ASSERT(nir_num_opcodes
<= 512);
762 union packed_instr header
;
765 header
.alu
.instr_type
= alu
->instr
.type
;
766 header
.alu
.exact
= alu
->exact
;
767 header
.alu
.no_signed_wrap
= alu
->no_signed_wrap
;
768 header
.alu
.no_unsigned_wrap
= alu
->no_unsigned_wrap
;
769 header
.alu
.saturate
= alu
->dest
.saturate
;
770 header
.alu
.writemask
= alu
->dest
.write_mask
;
771 header
.alu
.op
= alu
->op
;
772 header
.alu
.packed_src_ssa_16bit
= is_alu_src_ssa_16bit(ctx
, alu
);
774 write_dest(ctx
, &alu
->dest
.dest
, header
);
776 if (header
.alu
.packed_src_ssa_16bit
) {
777 for (unsigned i
= 0; i
< num_srcs
; i
++) {
778 assert(alu
->src
[i
].src
.is_ssa
);
779 unsigned idx
= write_lookup_object(ctx
, alu
->src
[i
].src
.ssa
);
780 assert(idx
< (1 << 16));
781 blob_write_uint16(ctx
->blob
, idx
);
784 for (unsigned i
= 0; i
< num_srcs
; i
++) {
785 union packed_src src
;
788 src
.alu
.negate
= alu
->src
[i
].negate
;
789 src
.alu
.abs
= alu
->src
[i
].abs
;
790 src
.alu
.swizzle_x
= alu
->src
[i
].swizzle
[0];
791 src
.alu
.swizzle_y
= alu
->src
[i
].swizzle
[1];
792 src
.alu
.swizzle_z
= alu
->src
[i
].swizzle
[2];
793 src
.alu
.swizzle_w
= alu
->src
[i
].swizzle
[3];
795 write_src_full(ctx
, &alu
->src
[i
].src
, src
);
800 static nir_alu_instr
*
801 read_alu(read_ctx
*ctx
, union packed_instr header
)
803 unsigned num_srcs
= nir_op_infos
[header
.alu
.op
].num_inputs
;
804 nir_alu_instr
*alu
= nir_alu_instr_create(ctx
->nir
, header
.alu
.op
);
806 alu
->exact
= header
.alu
.exact
;
807 alu
->no_signed_wrap
= header
.alu
.no_signed_wrap
;
808 alu
->no_unsigned_wrap
= header
.alu
.no_unsigned_wrap
;
809 alu
->dest
.saturate
= header
.alu
.saturate
;
810 alu
->dest
.write_mask
= header
.alu
.writemask
;
812 read_dest(ctx
, &alu
->dest
.dest
, &alu
->instr
, header
);
814 if (header
.alu
.packed_src_ssa_16bit
) {
815 for (unsigned i
= 0; i
< num_srcs
; i
++) {
816 nir_alu_src
*src
= &alu
->src
[i
];
817 src
->src
.is_ssa
= true;
818 src
->src
.ssa
= read_lookup_object(ctx
, blob_read_uint16(ctx
->blob
));
820 memset(&src
->swizzle
, 0, sizeof(src
->swizzle
));
822 unsigned src_components
= nir_ssa_alu_instr_src_components(alu
, i
);
824 for (unsigned chan
= 0; chan
< src_components
; chan
++)
825 src
->swizzle
[chan
] = chan
;
828 for (unsigned i
= 0; i
< num_srcs
; i
++) {
829 union packed_src src
= read_src(ctx
, &alu
->src
[i
].src
, &alu
->instr
);
831 alu
->src
[i
].negate
= src
.alu
.negate
;
832 alu
->src
[i
].abs
= src
.alu
.abs
;
833 alu
->src
[i
].swizzle
[0] = src
.alu
.swizzle_x
;
834 alu
->src
[i
].swizzle
[1] = src
.alu
.swizzle_y
;
835 alu
->src
[i
].swizzle
[2] = src
.alu
.swizzle_z
;
836 alu
->src
[i
].swizzle
[3] = src
.alu
.swizzle_w
;
844 write_deref(write_ctx
*ctx
, const nir_deref_instr
*deref
)
846 assert(deref
->deref_type
< 8);
847 assert(deref
->mode
< (1 << 10));
849 union packed_instr header
;
852 header
.deref
.instr_type
= deref
->instr
.type
;
853 header
.deref
.deref_type
= deref
->deref_type
;
855 if (deref
->deref_type
== nir_deref_type_cast
) {
856 header
.deref
.mode
= deref
->mode
;
857 header
.deref
.cast_type_same_as_last
= deref
->type
== ctx
->last_type
;
860 write_dest(ctx
, &deref
->dest
, header
);
862 if (deref
->deref_type
== nir_deref_type_var
) {
863 write_object(ctx
, deref
->var
);
867 write_src(ctx
, &deref
->parent
);
869 switch (deref
->deref_type
) {
870 case nir_deref_type_struct
:
871 blob_write_uint32(ctx
->blob
, deref
->strct
.index
);
874 case nir_deref_type_array
:
875 case nir_deref_type_ptr_as_array
:
876 write_src(ctx
, &deref
->arr
.index
);
879 case nir_deref_type_cast
:
880 blob_write_uint32(ctx
->blob
, deref
->cast
.ptr_stride
);
881 if (!header
.deref
.cast_type_same_as_last
) {
882 encode_type_to_blob(ctx
->blob
, deref
->type
);
883 ctx
->last_type
= deref
->type
;
887 case nir_deref_type_array_wildcard
:
892 unreachable("Invalid deref type");
896 static nir_deref_instr
*
897 read_deref(read_ctx
*ctx
, union packed_instr header
)
899 nir_deref_type deref_type
= header
.deref
.deref_type
;
900 nir_deref_instr
*deref
= nir_deref_instr_create(ctx
->nir
, deref_type
);
902 read_dest(ctx
, &deref
->dest
, &deref
->instr
, header
);
904 if (deref_type
== nir_deref_type_var
) {
905 deref
->var
= read_object(ctx
);
906 deref
->type
= deref
->var
->type
;
907 deref
->mode
= deref
->var
->data
.mode
;
911 read_src(ctx
, &deref
->parent
, &deref
->instr
);
912 nir_deref_instr
*parent
;
914 switch (deref
->deref_type
) {
915 case nir_deref_type_struct
:
916 parent
= nir_src_as_deref(deref
->parent
);
917 deref
->strct
.index
= blob_read_uint32(ctx
->blob
);
918 deref
->type
= glsl_get_struct_field(parent
->type
, deref
->strct
.index
);
921 case nir_deref_type_array
:
922 case nir_deref_type_ptr_as_array
:
923 parent
= nir_src_as_deref(deref
->parent
);
924 if (deref
->deref_type
== nir_deref_type_array
)
925 deref
->type
= glsl_get_array_element(parent
->type
);
927 deref
->type
= parent
->type
;
928 read_src(ctx
, &deref
->arr
.index
, &deref
->instr
);
931 case nir_deref_type_cast
:
932 deref
->cast
.ptr_stride
= blob_read_uint32(ctx
->blob
);
933 if (header
.deref
.cast_type_same_as_last
) {
934 deref
->type
= ctx
->last_type
;
936 deref
->type
= decode_type_from_blob(ctx
->blob
);
937 ctx
->last_type
= deref
->type
;
941 case nir_deref_type_array_wildcard
:
942 parent
= nir_src_as_deref(deref
->parent
);
943 deref
->type
= glsl_get_array_element(parent
->type
);
947 unreachable("Invalid deref type");
950 if (deref
->deref_type
== nir_deref_type_cast
) {
951 deref
->mode
= header
.deref
.mode
;
953 assert(deref
->parent
.is_ssa
);
954 deref
->mode
= nir_instr_as_deref(deref
->parent
.ssa
->parent_instr
)->mode
;
961 write_intrinsic(write_ctx
*ctx
, const nir_intrinsic_instr
*intrin
)
963 /* 9 bits for nir_intrinsic_op */
964 STATIC_ASSERT(nir_num_intrinsics
<= 512);
965 unsigned num_srcs
= nir_intrinsic_infos
[intrin
->intrinsic
].num_srcs
;
966 unsigned num_indices
= nir_intrinsic_infos
[intrin
->intrinsic
].num_indices
;
967 assert(intrin
->intrinsic
< 512);
969 union packed_instr header
;
972 header
.intrinsic
.instr_type
= intrin
->instr
.type
;
973 header
.intrinsic
.intrinsic
= intrin
->intrinsic
;
974 header
.intrinsic
.num_components
=
975 encode_num_components_in_3bits(intrin
->num_components
);
977 /* Analyze constant indices to decide how to encode them. */
979 unsigned max_bits
= 0;
980 for (unsigned i
= 0; i
< num_indices
; i
++) {
981 unsigned max
= util_last_bit(intrin
->const_index
[i
]);
982 max_bits
= MAX2(max_bits
, max
);
985 if (max_bits
* num_indices
<= 6) {
986 header
.intrinsic
.const_indices_encoding
= const_indices_6bit_all_combined
;
988 /* Pack all const indices into 6 bits. */
989 unsigned bit_size
= 6 / num_indices
;
990 for (unsigned i
= 0; i
< num_indices
; i
++) {
991 header
.intrinsic
.packed_const_indices
|=
992 intrin
->const_index
[i
] << (i
* bit_size
);
994 } else if (max_bits
<= 8)
995 header
.intrinsic
.const_indices_encoding
= const_indices_8bit
;
996 else if (max_bits
<= 16)
997 header
.intrinsic
.const_indices_encoding
= const_indices_16bit
;
999 header
.intrinsic
.const_indices_encoding
= const_indices_32bit
;
1002 if (nir_intrinsic_infos
[intrin
->intrinsic
].has_dest
)
1003 write_dest(ctx
, &intrin
->dest
, header
);
1005 blob_write_uint32(ctx
->blob
, header
.u32
);
1007 for (unsigned i
= 0; i
< num_srcs
; i
++)
1008 write_src(ctx
, &intrin
->src
[i
]);
1011 switch (header
.intrinsic
.const_indices_encoding
) {
1012 case const_indices_8bit
:
1013 for (unsigned i
= 0; i
< num_indices
; i
++)
1014 blob_write_uint8(ctx
->blob
, intrin
->const_index
[i
]);
1016 case const_indices_16bit
:
1017 for (unsigned i
= 0; i
< num_indices
; i
++)
1018 blob_write_uint16(ctx
->blob
, intrin
->const_index
[i
]);
1020 case const_indices_32bit
:
1021 for (unsigned i
= 0; i
< num_indices
; i
++)
1022 blob_write_uint32(ctx
->blob
, intrin
->const_index
[i
]);
1028 static nir_intrinsic_instr
*
1029 read_intrinsic(read_ctx
*ctx
, union packed_instr header
)
1031 nir_intrinsic_op op
= header
.intrinsic
.intrinsic
;
1032 nir_intrinsic_instr
*intrin
= nir_intrinsic_instr_create(ctx
->nir
, op
);
1034 unsigned num_srcs
= nir_intrinsic_infos
[op
].num_srcs
;
1035 unsigned num_indices
= nir_intrinsic_infos
[op
].num_indices
;
1037 intrin
->num_components
=
1038 decode_num_components_in_3bits(header
.intrinsic
.num_components
);
1040 if (nir_intrinsic_infos
[op
].has_dest
)
1041 read_dest(ctx
, &intrin
->dest
, &intrin
->instr
, header
);
1043 for (unsigned i
= 0; i
< num_srcs
; i
++)
1044 read_src(ctx
, &intrin
->src
[i
], &intrin
->instr
);
1047 switch (header
.intrinsic
.const_indices_encoding
) {
1048 case const_indices_6bit_all_combined
: {
1049 unsigned bit_size
= 6 / num_indices
;
1050 unsigned bit_mask
= u_bit_consecutive(0, bit_size
);
1051 for (unsigned i
= 0; i
< num_indices
; i
++) {
1052 intrin
->const_index
[i
] =
1053 (header
.intrinsic
.packed_const_indices
>> (i
* bit_size
)) &
1058 case const_indices_8bit
:
1059 for (unsigned i
= 0; i
< num_indices
; i
++)
1060 intrin
->const_index
[i
] = blob_read_uint8(ctx
->blob
);
1062 case const_indices_16bit
:
1063 for (unsigned i
= 0; i
< num_indices
; i
++)
1064 intrin
->const_index
[i
] = blob_read_uint16(ctx
->blob
);
1066 case const_indices_32bit
:
1067 for (unsigned i
= 0; i
< num_indices
; i
++)
1068 intrin
->const_index
[i
] = blob_read_uint32(ctx
->blob
);
1077 write_load_const(write_ctx
*ctx
, const nir_load_const_instr
*lc
)
1079 assert(lc
->def
.num_components
>= 1 && lc
->def
.num_components
<= 16);
1080 union packed_instr header
;
1083 header
.load_const
.instr_type
= lc
->instr
.type
;
1084 header
.load_const
.last_component
= lc
->def
.num_components
- 1;
1085 header
.load_const
.bit_size
= encode_bit_size_3bits(lc
->def
.bit_size
);
1086 header
.load_const
.packing
= load_const_full
;
1088 /* Try to pack 1-component constants into the 19 free bits in the header. */
1089 if (lc
->def
.num_components
== 1) {
1090 switch (lc
->def
.bit_size
) {
1092 if ((lc
->value
[0].u64
& 0x1fffffffffffull
) == 0) {
1093 /* packed_value contains high 19 bits, low bits are 0 */
1094 header
.load_const
.packing
= load_const_scalar_hi_19bits
;
1095 header
.load_const
.packed_value
= lc
->value
[0].u64
>> 45;
1096 } else if (((lc
->value
[0].i64
<< 45) >> 45) == lc
->value
[0].i64
) {
1097 /* packed_value contains low 19 bits, high bits are sign-extended */
1098 header
.load_const
.packing
= load_const_scalar_lo_19bits_sext
;
1099 header
.load_const
.packed_value
= lc
->value
[0].u64
;
1104 if ((lc
->value
[0].u32
& 0x1fff) == 0) {
1105 header
.load_const
.packing
= load_const_scalar_hi_19bits
;
1106 header
.load_const
.packed_value
= lc
->value
[0].u32
>> 13;
1107 } else if (((lc
->value
[0].i32
<< 13) >> 13) == lc
->value
[0].i32
) {
1108 header
.load_const
.packing
= load_const_scalar_lo_19bits_sext
;
1109 header
.load_const
.packed_value
= lc
->value
[0].u32
;
1114 header
.load_const
.packing
= load_const_scalar_lo_19bits_sext
;
1115 header
.load_const
.packed_value
= lc
->value
[0].u16
;
1118 header
.load_const
.packing
= load_const_scalar_lo_19bits_sext
;
1119 header
.load_const
.packed_value
= lc
->value
[0].u8
;
1122 header
.load_const
.packing
= load_const_scalar_lo_19bits_sext
;
1123 header
.load_const
.packed_value
= lc
->value
[0].b
;
1126 unreachable("invalid bit_size");
1130 blob_write_uint32(ctx
->blob
, header
.u32
);
1132 if (header
.load_const
.packing
== load_const_full
) {
1133 switch (lc
->def
.bit_size
) {
1135 blob_write_bytes(ctx
->blob
, lc
->value
,
1136 sizeof(*lc
->value
) * lc
->def
.num_components
);
1140 for (unsigned i
= 0; i
< lc
->def
.num_components
; i
++)
1141 blob_write_uint32(ctx
->blob
, lc
->value
[i
].u32
);
1145 for (unsigned i
= 0; i
< lc
->def
.num_components
; i
++)
1146 blob_write_uint16(ctx
->blob
, lc
->value
[i
].u16
);
1150 assert(lc
->def
.bit_size
<= 8);
1151 for (unsigned i
= 0; i
< lc
->def
.num_components
; i
++)
1152 blob_write_uint8(ctx
->blob
, lc
->value
[i
].u8
);
1157 write_add_object(ctx
, &lc
->def
);
1160 static nir_load_const_instr
*
1161 read_load_const(read_ctx
*ctx
, union packed_instr header
)
1163 nir_load_const_instr
*lc
=
1164 nir_load_const_instr_create(ctx
->nir
, header
.load_const
.last_component
+ 1,
1165 decode_bit_size_3bits(header
.load_const
.bit_size
));
1167 switch (header
.load_const
.packing
) {
1168 case load_const_scalar_hi_19bits
:
1169 switch (lc
->def
.bit_size
) {
1171 lc
->value
[0].u64
= (uint64_t)header
.load_const
.packed_value
<< 45;
1174 lc
->value
[0].u32
= (uint64_t)header
.load_const
.packed_value
<< 13;
1177 unreachable("invalid bit_size");
1181 case load_const_scalar_lo_19bits_sext
:
1182 switch (lc
->def
.bit_size
) {
1184 lc
->value
[0].i64
= ((int64_t)header
.load_const
.packed_value
<< 45) >> 45;
1187 lc
->value
[0].i32
= ((int32_t)header
.load_const
.packed_value
<< 13) >> 13;
1190 lc
->value
[0].u16
= header
.load_const
.packed_value
;
1193 lc
->value
[0].u8
= header
.load_const
.packed_value
;
1196 lc
->value
[0].b
= header
.load_const
.packed_value
;
1199 unreachable("invalid bit_size");
1203 case load_const_full
:
1204 switch (lc
->def
.bit_size
) {
1206 blob_copy_bytes(ctx
->blob
, lc
->value
, sizeof(*lc
->value
) * lc
->def
.num_components
);
1210 for (unsigned i
= 0; i
< lc
->def
.num_components
; i
++)
1211 lc
->value
[i
].u32
= blob_read_uint32(ctx
->blob
);
1215 for (unsigned i
= 0; i
< lc
->def
.num_components
; i
++)
1216 lc
->value
[i
].u16
= blob_read_uint16(ctx
->blob
);
1220 assert(lc
->def
.bit_size
<= 8);
1221 for (unsigned i
= 0; i
< lc
->def
.num_components
; i
++)
1222 lc
->value
[i
].u8
= blob_read_uint8(ctx
->blob
);
1228 read_add_object(ctx
, &lc
->def
);
1233 write_ssa_undef(write_ctx
*ctx
, const nir_ssa_undef_instr
*undef
)
1235 assert(undef
->def
.num_components
>= 1 && undef
->def
.num_components
<= 16);
1237 union packed_instr header
;
1240 header
.undef
.instr_type
= undef
->instr
.type
;
1241 header
.undef
.last_component
= undef
->def
.num_components
- 1;
1242 header
.undef
.bit_size
= encode_bit_size_3bits(undef
->def
.bit_size
);
1244 blob_write_uint32(ctx
->blob
, header
.u32
);
1245 write_add_object(ctx
, &undef
->def
);
1248 static nir_ssa_undef_instr
*
1249 read_ssa_undef(read_ctx
*ctx
, union packed_instr header
)
1251 nir_ssa_undef_instr
*undef
=
1252 nir_ssa_undef_instr_create(ctx
->nir
, header
.undef
.last_component
+ 1,
1253 decode_bit_size_3bits(header
.undef
.bit_size
));
1255 read_add_object(ctx
, &undef
->def
);
1259 union packed_tex_data
{
1262 enum glsl_sampler_dim sampler_dim
:4;
1263 nir_alu_type dest_type
:8;
1264 unsigned coord_components
:3;
1265 unsigned is_array
:1;
1266 unsigned is_shadow
:1;
1267 unsigned is_new_style_shadow
:1;
1268 unsigned component
:2;
1269 unsigned unused
:10; /* Mark unused for valgrind. */
1274 write_tex(write_ctx
*ctx
, const nir_tex_instr
*tex
)
1276 assert(tex
->num_srcs
< 16);
1277 assert(tex
->op
< 16);
1278 assert(tex
->texture_array_size
< 1024);
1280 union packed_instr header
;
1283 header
.tex
.instr_type
= tex
->instr
.type
;
1284 header
.tex
.num_srcs
= tex
->num_srcs
;
1285 header
.tex
.op
= tex
->op
;
1286 header
.tex
.texture_array_size
= tex
->texture_array_size
;
1288 write_dest(ctx
, &tex
->dest
, header
);
1290 blob_write_uint32(ctx
->blob
, tex
->texture_index
);
1291 blob_write_uint32(ctx
->blob
, tex
->sampler_index
);
1292 if (tex
->op
== nir_texop_tg4
)
1293 blob_write_bytes(ctx
->blob
, tex
->tg4_offsets
, sizeof(tex
->tg4_offsets
));
1295 STATIC_ASSERT(sizeof(union packed_tex_data
) == sizeof(uint32_t));
1296 union packed_tex_data packed
= {
1297 .u
.sampler_dim
= tex
->sampler_dim
,
1298 .u
.dest_type
= tex
->dest_type
,
1299 .u
.coord_components
= tex
->coord_components
,
1300 .u
.is_array
= tex
->is_array
,
1301 .u
.is_shadow
= tex
->is_shadow
,
1302 .u
.is_new_style_shadow
= tex
->is_new_style_shadow
,
1303 .u
.component
= tex
->component
,
1305 blob_write_uint32(ctx
->blob
, packed
.u32
);
1307 for (unsigned i
= 0; i
< tex
->num_srcs
; i
++) {
1308 union packed_src src
;
1310 src
.tex
.src_type
= tex
->src
[i
].src_type
;
1311 write_src_full(ctx
, &tex
->src
[i
].src
, src
);
1315 static nir_tex_instr
*
1316 read_tex(read_ctx
*ctx
, union packed_instr header
)
1318 nir_tex_instr
*tex
= nir_tex_instr_create(ctx
->nir
, header
.tex
.num_srcs
);
1320 read_dest(ctx
, &tex
->dest
, &tex
->instr
, header
);
1322 tex
->op
= header
.tex
.op
;
1323 tex
->texture_index
= blob_read_uint32(ctx
->blob
);
1324 tex
->texture_array_size
= header
.tex
.texture_array_size
;
1325 tex
->sampler_index
= blob_read_uint32(ctx
->blob
);
1326 if (tex
->op
== nir_texop_tg4
)
1327 blob_copy_bytes(ctx
->blob
, tex
->tg4_offsets
, sizeof(tex
->tg4_offsets
));
1329 union packed_tex_data packed
;
1330 packed
.u32
= blob_read_uint32(ctx
->blob
);
1331 tex
->sampler_dim
= packed
.u
.sampler_dim
;
1332 tex
->dest_type
= packed
.u
.dest_type
;
1333 tex
->coord_components
= packed
.u
.coord_components
;
1334 tex
->is_array
= packed
.u
.is_array
;
1335 tex
->is_shadow
= packed
.u
.is_shadow
;
1336 tex
->is_new_style_shadow
= packed
.u
.is_new_style_shadow
;
1337 tex
->component
= packed
.u
.component
;
1339 for (unsigned i
= 0; i
< tex
->num_srcs
; i
++) {
1340 union packed_src src
= read_src(ctx
, &tex
->src
[i
].src
, &tex
->instr
);
1341 tex
->src
[i
].src_type
= src
.tex
.src_type
;
1348 write_phi(write_ctx
*ctx
, const nir_phi_instr
*phi
)
1350 union packed_instr header
;
1353 header
.phi
.instr_type
= phi
->instr
.type
;
1354 header
.phi
.num_srcs
= exec_list_length(&phi
->srcs
);
1356 /* Phi nodes are special, since they may reference SSA definitions and
1357 * basic blocks that don't exist yet. We leave two empty uint32_t's here,
1358 * and then store enough information so that a later fixup pass can fill
1359 * them in correctly.
1361 write_dest(ctx
, &phi
->dest
, header
);
1363 nir_foreach_phi_src(src
, phi
) {
1364 assert(src
->src
.is_ssa
);
1365 size_t blob_offset
= blob_reserve_uint32(ctx
->blob
);
1366 ASSERTED
size_t blob_offset2
= blob_reserve_uint32(ctx
->blob
);
1367 assert(blob_offset
+ sizeof(uint32_t) == blob_offset2
);
1368 write_phi_fixup fixup
= {
1369 .blob_offset
= blob_offset
,
1370 .src
= src
->src
.ssa
,
1373 util_dynarray_append(&ctx
->phi_fixups
, write_phi_fixup
, fixup
);
1378 write_fixup_phis(write_ctx
*ctx
)
1380 util_dynarray_foreach(&ctx
->phi_fixups
, write_phi_fixup
, fixup
) {
1381 uint32_t *blob_ptr
= (uint32_t *)(ctx
->blob
->data
+ fixup
->blob_offset
);
1382 blob_ptr
[0] = write_lookup_object(ctx
, fixup
->src
);
1383 blob_ptr
[1] = write_lookup_object(ctx
, fixup
->block
);
1386 util_dynarray_clear(&ctx
->phi_fixups
);
1389 static nir_phi_instr
*
1390 read_phi(read_ctx
*ctx
, nir_block
*blk
, union packed_instr header
)
1392 nir_phi_instr
*phi
= nir_phi_instr_create(ctx
->nir
);
1394 read_dest(ctx
, &phi
->dest
, &phi
->instr
, header
);
1396 /* For similar reasons as before, we just store the index directly into the
1397 * pointer, and let a later pass resolve the phi sources.
1399 * In order to ensure that the copied sources (which are just the indices
1400 * from the blob for now) don't get inserted into the old shader's use-def
1401 * lists, we have to add the phi instruction *before* we set up its
1404 nir_instr_insert_after_block(blk
, &phi
->instr
);
1406 for (unsigned i
= 0; i
< header
.phi
.num_srcs
; i
++) {
1407 nir_phi_src
*src
= ralloc(phi
, nir_phi_src
);
1409 src
->src
.is_ssa
= true;
1410 src
->src
.ssa
= (nir_ssa_def
*)(uintptr_t) blob_read_uint32(ctx
->blob
);
1411 src
->pred
= (nir_block
*)(uintptr_t) blob_read_uint32(ctx
->blob
);
1413 /* Since we're not letting nir_insert_instr handle use/def stuff for us,
1414 * we have to set the parent_instr manually. It doesn't really matter
1415 * when we do it, so we might as well do it here.
1417 src
->src
.parent_instr
= &phi
->instr
;
1419 /* Stash it in the list of phi sources. We'll walk this list and fix up
1420 * sources at the very end of read_function_impl.
1422 list_add(&src
->src
.use_link
, &ctx
->phi_srcs
);
1424 exec_list_push_tail(&phi
->srcs
, &src
->node
);
1431 read_fixup_phis(read_ctx
*ctx
)
1433 list_for_each_entry_safe(nir_phi_src
, src
, &ctx
->phi_srcs
, src
.use_link
) {
1434 src
->pred
= read_lookup_object(ctx
, (uintptr_t)src
->pred
);
1435 src
->src
.ssa
= read_lookup_object(ctx
, (uintptr_t)src
->src
.ssa
);
1437 /* Remove from this list */
1438 list_del(&src
->src
.use_link
);
1440 list_addtail(&src
->src
.use_link
, &src
->src
.ssa
->uses
);
1442 assert(list_is_empty(&ctx
->phi_srcs
));
1446 write_jump(write_ctx
*ctx
, const nir_jump_instr
*jmp
)
1448 assert(jmp
->type
< 4);
1450 union packed_instr header
;
1453 header
.jump
.instr_type
= jmp
->instr
.type
;
1454 header
.jump
.type
= jmp
->type
;
1456 blob_write_uint32(ctx
->blob
, header
.u32
);
1459 static nir_jump_instr
*
1460 read_jump(read_ctx
*ctx
, union packed_instr header
)
1462 nir_jump_instr
*jmp
= nir_jump_instr_create(ctx
->nir
, header
.jump
.type
);
1467 write_call(write_ctx
*ctx
, const nir_call_instr
*call
)
1469 blob_write_uint32(ctx
->blob
, write_lookup_object(ctx
, call
->callee
));
1471 for (unsigned i
= 0; i
< call
->num_params
; i
++)
1472 write_src(ctx
, &call
->params
[i
]);
1475 static nir_call_instr
*
1476 read_call(read_ctx
*ctx
)
1478 nir_function
*callee
= read_object(ctx
);
1479 nir_call_instr
*call
= nir_call_instr_create(ctx
->nir
, callee
);
1481 for (unsigned i
= 0; i
< call
->num_params
; i
++)
1482 read_src(ctx
, &call
->params
[i
], call
);
1488 write_instr(write_ctx
*ctx
, const nir_instr
*instr
)
1490 /* We have only 4 bits for the instruction type. */
1491 assert(instr
->type
< 16);
1493 switch (instr
->type
) {
1494 case nir_instr_type_alu
:
1495 write_alu(ctx
, nir_instr_as_alu(instr
));
1497 case nir_instr_type_deref
:
1498 write_deref(ctx
, nir_instr_as_deref(instr
));
1500 case nir_instr_type_intrinsic
:
1501 write_intrinsic(ctx
, nir_instr_as_intrinsic(instr
));
1503 case nir_instr_type_load_const
:
1504 write_load_const(ctx
, nir_instr_as_load_const(instr
));
1506 case nir_instr_type_ssa_undef
:
1507 write_ssa_undef(ctx
, nir_instr_as_ssa_undef(instr
));
1509 case nir_instr_type_tex
:
1510 write_tex(ctx
, nir_instr_as_tex(instr
));
1512 case nir_instr_type_phi
:
1513 write_phi(ctx
, nir_instr_as_phi(instr
));
1515 case nir_instr_type_jump
:
1516 write_jump(ctx
, nir_instr_as_jump(instr
));
1518 case nir_instr_type_call
:
1519 blob_write_uint32(ctx
->blob
, instr
->type
);
1520 write_call(ctx
, nir_instr_as_call(instr
));
1522 case nir_instr_type_parallel_copy
:
1523 unreachable("Cannot write parallel copies");
1525 unreachable("bad instr type");
1530 read_instr(read_ctx
*ctx
, nir_block
*block
)
1532 STATIC_ASSERT(sizeof(union packed_instr
) == 4);
1533 union packed_instr header
;
1534 header
.u32
= blob_read_uint32(ctx
->blob
);
1537 switch (header
.any
.instr_type
) {
1538 case nir_instr_type_alu
:
1539 instr
= &read_alu(ctx
, header
)->instr
;
1541 case nir_instr_type_deref
:
1542 instr
= &read_deref(ctx
, header
)->instr
;
1544 case nir_instr_type_intrinsic
:
1545 instr
= &read_intrinsic(ctx
, header
)->instr
;
1547 case nir_instr_type_load_const
:
1548 instr
= &read_load_const(ctx
, header
)->instr
;
1550 case nir_instr_type_ssa_undef
:
1551 instr
= &read_ssa_undef(ctx
, header
)->instr
;
1553 case nir_instr_type_tex
:
1554 instr
= &read_tex(ctx
, header
)->instr
;
1556 case nir_instr_type_phi
:
1557 /* Phi instructions are a bit of a special case when reading because we
1558 * don't want inserting the instruction to automatically handle use/defs
1559 * for us. Instead, we need to wait until all the blocks/instructions
1560 * are read so that we can set their sources up.
1562 read_phi(ctx
, block
, header
);
1564 case nir_instr_type_jump
:
1565 instr
= &read_jump(ctx
, header
)->instr
;
1567 case nir_instr_type_call
:
1568 instr
= &read_call(ctx
)->instr
;
1570 case nir_instr_type_parallel_copy
:
1571 unreachable("Cannot read parallel copies");
1573 unreachable("bad instr type");
1576 nir_instr_insert_after_block(block
, instr
);
1580 write_block(write_ctx
*ctx
, const nir_block
*block
)
1582 write_add_object(ctx
, block
);
1583 blob_write_uint32(ctx
->blob
, exec_list_length(&block
->instr_list
));
1584 nir_foreach_instr(instr
, block
)
1585 write_instr(ctx
, instr
);
1589 read_block(read_ctx
*ctx
, struct exec_list
*cf_list
)
1591 /* Don't actually create a new block. Just use the one from the tail of
1592 * the list. NIR guarantees that the tail of the list is a block and that
1593 * no two blocks are side-by-side in the IR; It should be empty.
1596 exec_node_data(nir_block
, exec_list_get_tail(cf_list
), cf_node
.node
);
1598 read_add_object(ctx
, block
);
1599 unsigned num_instrs
= blob_read_uint32(ctx
->blob
);
1600 for (unsigned i
= 0; i
< num_instrs
; i
++) {
1601 read_instr(ctx
, block
);
1606 write_cf_list(write_ctx
*ctx
, const struct exec_list
*cf_list
);
1609 read_cf_list(read_ctx
*ctx
, struct exec_list
*cf_list
);
1612 write_if(write_ctx
*ctx
, nir_if
*nif
)
1614 write_src(ctx
, &nif
->condition
);
1616 write_cf_list(ctx
, &nif
->then_list
);
1617 write_cf_list(ctx
, &nif
->else_list
);
1621 read_if(read_ctx
*ctx
, struct exec_list
*cf_list
)
1623 nir_if
*nif
= nir_if_create(ctx
->nir
);
1625 read_src(ctx
, &nif
->condition
, nif
);
1627 nir_cf_node_insert_end(cf_list
, &nif
->cf_node
);
1629 read_cf_list(ctx
, &nif
->then_list
);
1630 read_cf_list(ctx
, &nif
->else_list
);
1634 write_loop(write_ctx
*ctx
, nir_loop
*loop
)
1636 write_cf_list(ctx
, &loop
->body
);
1640 read_loop(read_ctx
*ctx
, struct exec_list
*cf_list
)
1642 nir_loop
*loop
= nir_loop_create(ctx
->nir
);
1644 nir_cf_node_insert_end(cf_list
, &loop
->cf_node
);
1646 read_cf_list(ctx
, &loop
->body
);
1650 write_cf_node(write_ctx
*ctx
, nir_cf_node
*cf
)
1652 blob_write_uint32(ctx
->blob
, cf
->type
);
1655 case nir_cf_node_block
:
1656 write_block(ctx
, nir_cf_node_as_block(cf
));
1658 case nir_cf_node_if
:
1659 write_if(ctx
, nir_cf_node_as_if(cf
));
1661 case nir_cf_node_loop
:
1662 write_loop(ctx
, nir_cf_node_as_loop(cf
));
1665 unreachable("bad cf type");
1670 read_cf_node(read_ctx
*ctx
, struct exec_list
*list
)
1672 nir_cf_node_type type
= blob_read_uint32(ctx
->blob
);
1675 case nir_cf_node_block
:
1676 read_block(ctx
, list
);
1678 case nir_cf_node_if
:
1681 case nir_cf_node_loop
:
1682 read_loop(ctx
, list
);
1685 unreachable("bad cf type");
1690 write_cf_list(write_ctx
*ctx
, const struct exec_list
*cf_list
)
1692 blob_write_uint32(ctx
->blob
, exec_list_length(cf_list
));
1693 foreach_list_typed(nir_cf_node
, cf
, node
, cf_list
) {
1694 write_cf_node(ctx
, cf
);
1699 read_cf_list(read_ctx
*ctx
, struct exec_list
*cf_list
)
1701 uint32_t num_cf_nodes
= blob_read_uint32(ctx
->blob
);
1702 for (unsigned i
= 0; i
< num_cf_nodes
; i
++)
1703 read_cf_node(ctx
, cf_list
);
1707 write_function_impl(write_ctx
*ctx
, const nir_function_impl
*fi
)
1709 write_var_list(ctx
, &fi
->locals
);
1710 write_reg_list(ctx
, &fi
->registers
);
1711 blob_write_uint32(ctx
->blob
, fi
->reg_alloc
);
1713 write_cf_list(ctx
, &fi
->body
);
1714 write_fixup_phis(ctx
);
1717 static nir_function_impl
*
1718 read_function_impl(read_ctx
*ctx
, nir_function
*fxn
)
1720 nir_function_impl
*fi
= nir_function_impl_create_bare(ctx
->nir
);
1723 read_var_list(ctx
, &fi
->locals
);
1724 read_reg_list(ctx
, &fi
->registers
);
1725 fi
->reg_alloc
= blob_read_uint32(ctx
->blob
);
1727 read_cf_list(ctx
, &fi
->body
);
1728 read_fixup_phis(ctx
);
1730 fi
->valid_metadata
= 0;
1736 write_function(write_ctx
*ctx
, const nir_function
*fxn
)
1738 uint32_t flags
= fxn
->is_entrypoint
;
1743 blob_write_uint32(ctx
->blob
, flags
);
1745 blob_write_string(ctx
->blob
, fxn
->name
);
1747 write_add_object(ctx
, fxn
);
1749 blob_write_uint32(ctx
->blob
, fxn
->num_params
);
1750 for (unsigned i
= 0; i
< fxn
->num_params
; i
++) {
1752 ((uint32_t)fxn
->params
[i
].num_components
) |
1753 ((uint32_t)fxn
->params
[i
].bit_size
) << 8;
1754 blob_write_uint32(ctx
->blob
, val
);
1757 /* At first glance, it looks like we should write the function_impl here.
1758 * However, call instructions need to be able to reference at least the
1759 * function and those will get processed as we write the function_impls.
1760 * We stop here and write function_impls as a second pass.
1765 read_function(read_ctx
*ctx
)
1767 uint32_t flags
= blob_read_uint32(ctx
->blob
);
1768 bool has_name
= flags
& 0x2;
1769 char *name
= has_name
? blob_read_string(ctx
->blob
) : NULL
;
1771 nir_function
*fxn
= nir_function_create(ctx
->nir
, name
);
1773 read_add_object(ctx
, fxn
);
1775 fxn
->num_params
= blob_read_uint32(ctx
->blob
);
1776 fxn
->params
= ralloc_array(fxn
, nir_parameter
, fxn
->num_params
);
1777 for (unsigned i
= 0; i
< fxn
->num_params
; i
++) {
1778 uint32_t val
= blob_read_uint32(ctx
->blob
);
1779 fxn
->params
[i
].num_components
= val
& 0xff;
1780 fxn
->params
[i
].bit_size
= (val
>> 8) & 0xff;
1783 fxn
->is_entrypoint
= flags
& 0x1;
1785 fxn
->impl
= NIR_SERIALIZE_FUNC_HAS_IMPL
;
1789 * Serialize NIR into a binary blob.
1791 * \param strip Don't serialize information only useful for debugging,
1792 * such as variable names, making cache hits from similar
1793 * shaders more likely.
1796 nir_serialize(struct blob
*blob
, const nir_shader
*nir
, bool strip
)
1798 write_ctx ctx
= {0};
1799 ctx
.remap_table
= _mesa_pointer_hash_table_create(NULL
);
1803 util_dynarray_init(&ctx
.phi_fixups
, NULL
);
1805 size_t idx_size_offset
= blob_reserve_uint32(blob
);
1807 struct shader_info info
= nir
->info
;
1808 uint32_t strings
= 0;
1809 if (!strip
&& info
.name
)
1811 if (!strip
&& info
.label
)
1813 blob_write_uint32(blob
, strings
);
1814 if (!strip
&& info
.name
)
1815 blob_write_string(blob
, info
.name
);
1816 if (!strip
&& info
.label
)
1817 blob_write_string(blob
, info
.label
);
1818 info
.name
= info
.label
= NULL
;
1819 blob_write_bytes(blob
, (uint8_t *) &info
, sizeof(info
));
1821 write_var_list(&ctx
, &nir
->uniforms
);
1822 write_var_list(&ctx
, &nir
->inputs
);
1823 write_var_list(&ctx
, &nir
->outputs
);
1824 write_var_list(&ctx
, &nir
->shared
);
1825 write_var_list(&ctx
, &nir
->globals
);
1826 write_var_list(&ctx
, &nir
->system_values
);
1828 blob_write_uint32(blob
, nir
->num_inputs
);
1829 blob_write_uint32(blob
, nir
->num_uniforms
);
1830 blob_write_uint32(blob
, nir
->num_outputs
);
1831 blob_write_uint32(blob
, nir
->num_shared
);
1832 blob_write_uint32(blob
, nir
->scratch_size
);
1834 blob_write_uint32(blob
, exec_list_length(&nir
->functions
));
1835 nir_foreach_function(fxn
, nir
) {
1836 write_function(&ctx
, fxn
);
1839 nir_foreach_function(fxn
, nir
) {
1841 write_function_impl(&ctx
, fxn
->impl
);
1844 blob_write_uint32(blob
, nir
->constant_data_size
);
1845 if (nir
->constant_data_size
> 0)
1846 blob_write_bytes(blob
, nir
->constant_data
, nir
->constant_data_size
);
1848 *(uint32_t *)(blob
->data
+ idx_size_offset
) = ctx
.next_idx
;
1850 _mesa_hash_table_destroy(ctx
.remap_table
, NULL
);
1851 util_dynarray_fini(&ctx
.phi_fixups
);
1855 nir_deserialize(void *mem_ctx
,
1856 const struct nir_shader_compiler_options
*options
,
1857 struct blob_reader
*blob
)
1861 list_inithead(&ctx
.phi_srcs
);
1862 ctx
.idx_table_len
= blob_read_uint32(blob
);
1863 ctx
.idx_table
= calloc(ctx
.idx_table_len
, sizeof(uintptr_t));
1865 uint32_t strings
= blob_read_uint32(blob
);
1866 char *name
= (strings
& 0x1) ? blob_read_string(blob
) : NULL
;
1867 char *label
= (strings
& 0x2) ? blob_read_string(blob
) : NULL
;
1869 struct shader_info info
;
1870 blob_copy_bytes(blob
, (uint8_t *) &info
, sizeof(info
));
1872 ctx
.nir
= nir_shader_create(mem_ctx
, info
.stage
, options
, NULL
);
1874 info
.name
= name
? ralloc_strdup(ctx
.nir
, name
) : NULL
;
1875 info
.label
= label
? ralloc_strdup(ctx
.nir
, label
) : NULL
;
1877 ctx
.nir
->info
= info
;
1879 read_var_list(&ctx
, &ctx
.nir
->uniforms
);
1880 read_var_list(&ctx
, &ctx
.nir
->inputs
);
1881 read_var_list(&ctx
, &ctx
.nir
->outputs
);
1882 read_var_list(&ctx
, &ctx
.nir
->shared
);
1883 read_var_list(&ctx
, &ctx
.nir
->globals
);
1884 read_var_list(&ctx
, &ctx
.nir
->system_values
);
1886 ctx
.nir
->num_inputs
= blob_read_uint32(blob
);
1887 ctx
.nir
->num_uniforms
= blob_read_uint32(blob
);
1888 ctx
.nir
->num_outputs
= blob_read_uint32(blob
);
1889 ctx
.nir
->num_shared
= blob_read_uint32(blob
);
1890 ctx
.nir
->scratch_size
= blob_read_uint32(blob
);
1892 unsigned num_functions
= blob_read_uint32(blob
);
1893 for (unsigned i
= 0; i
< num_functions
; i
++)
1894 read_function(&ctx
);
1896 nir_foreach_function(fxn
, ctx
.nir
) {
1897 if (fxn
->impl
== NIR_SERIALIZE_FUNC_HAS_IMPL
)
1898 fxn
->impl
= read_function_impl(&ctx
, fxn
);
1901 ctx
.nir
->constant_data_size
= blob_read_uint32(blob
);
1902 if (ctx
.nir
->constant_data_size
> 0) {
1903 ctx
.nir
->constant_data
=
1904 ralloc_size(ctx
.nir
, ctx
.nir
->constant_data_size
);
1905 blob_copy_bytes(blob
, ctx
.nir
->constant_data
,
1906 ctx
.nir
->constant_data_size
);
1909 free(ctx
.idx_table
);
1915 nir_shader_serialize_deserialize(nir_shader
*shader
)
1917 const struct nir_shader_compiler_options
*options
= shader
->options
;
1921 nir_serialize(&writer
, shader
, false);
1923 /* Delete all of dest's ralloc children but leave dest alone */
1924 void *dead_ctx
= ralloc_context(NULL
);
1925 ralloc_adopt(dead_ctx
, shader
);
1926 ralloc_free(dead_ctx
);
1928 dead_ctx
= ralloc_context(NULL
);
1930 struct blob_reader reader
;
1931 blob_reader_init(&reader
, writer
.data
, writer
.size
);
1932 nir_shader
*copy
= nir_deserialize(dead_ctx
, options
, &reader
);
1934 blob_finish(&writer
);
1936 nir_shader_replace(shader
, copy
);
1937 ralloc_free(dead_ctx
);