2 Copyright (C) 2019-2021 Free Software Foundation, Inc.
4 This file is part of libctf.
6 libctf is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
14 See the GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; see the file COPYING. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include <sys/param.h>
26 #define EOVERFLOW ERANGE
30 #define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
33 /* Make sure the ptrtab has enough space for at least one more type.
35 We start with 4KiB of ptrtab, enough for a thousand types, then grow it 25%
39 ctf_grow_ptrtab (ctf_dict_t
*fp
)
41 size_t new_ptrtab_len
= fp
->ctf_ptrtab_len
;
43 /* We allocate one more ptrtab entry than we need, for the initial zero,
44 plus one because the caller will probably allocate a new type. */
46 if (fp
->ctf_ptrtab
== NULL
)
47 new_ptrtab_len
= 1024;
48 else if ((fp
->ctf_typemax
+ 2) > fp
->ctf_ptrtab_len
)
49 new_ptrtab_len
= fp
->ctf_ptrtab_len
* 1.25;
51 if (new_ptrtab_len
!= fp
->ctf_ptrtab_len
)
55 if ((new_ptrtab
= realloc (fp
->ctf_ptrtab
,
56 new_ptrtab_len
* sizeof (uint32_t))) == NULL
)
57 return (ctf_set_errno (fp
, ENOMEM
));
59 fp
->ctf_ptrtab
= new_ptrtab
;
60 memset (fp
->ctf_ptrtab
+ fp
->ctf_ptrtab_len
, 0,
61 (new_ptrtab_len
- fp
->ctf_ptrtab_len
) * sizeof (uint32_t));
62 fp
->ctf_ptrtab_len
= new_ptrtab_len
;
67 /* To create an empty CTF dict, we just declare a zeroed header and call
68 ctf_bufopen() on it. If ctf_bufopen succeeds, we mark the new dict r/w and
69 initialize the dynamic members. We start assigning type IDs at 1 because
70 type ID 0 is used as a sentinel and a not-found indicator. */
73 ctf_create (int *errp
)
75 static const ctf_header_t hdr
= { .cth_preamble
= { CTF_MAGIC
, CTF_VERSION
, 0 } };
77 ctf_dynhash_t
*dthash
;
78 ctf_dynhash_t
*dvhash
;
79 ctf_dynhash_t
*structs
= NULL
, *unions
= NULL
, *enums
= NULL
, *names
= NULL
;
80 ctf_dynhash_t
*objthash
= NULL
, *funchash
= NULL
;
85 dthash
= ctf_dynhash_create (ctf_hash_integer
, ctf_hash_eq_integer
,
89 ctf_set_open_errno (errp
, EAGAIN
);
93 dvhash
= ctf_dynhash_create (ctf_hash_string
, ctf_hash_eq_string
,
97 ctf_set_open_errno (errp
, EAGAIN
);
101 structs
= ctf_dynhash_create (ctf_hash_string
, ctf_hash_eq_string
,
103 unions
= ctf_dynhash_create (ctf_hash_string
, ctf_hash_eq_string
,
105 enums
= ctf_dynhash_create (ctf_hash_string
, ctf_hash_eq_string
,
107 names
= ctf_dynhash_create (ctf_hash_string
, ctf_hash_eq_string
,
109 objthash
= ctf_dynhash_create (ctf_hash_string
, ctf_hash_eq_string
,
111 funchash
= ctf_dynhash_create (ctf_hash_string
, ctf_hash_eq_string
,
113 if (!structs
|| !unions
|| !enums
|| !names
)
115 ctf_set_open_errno (errp
, EAGAIN
);
119 cts
.cts_name
= _CTF_SECTION
;
121 cts
.cts_size
= sizeof (hdr
);
124 if ((fp
= ctf_bufopen_internal (&cts
, NULL
, NULL
, NULL
, 1, errp
)) == NULL
)
127 fp
->ctf_structs
.ctn_writable
= structs
;
128 fp
->ctf_unions
.ctn_writable
= unions
;
129 fp
->ctf_enums
.ctn_writable
= enums
;
130 fp
->ctf_names
.ctn_writable
= names
;
131 fp
->ctf_objthash
= objthash
;
132 fp
->ctf_funchash
= funchash
;
133 fp
->ctf_dthash
= dthash
;
134 fp
->ctf_dvhash
= dvhash
;
136 fp
->ctf_snapshots
= 1;
137 fp
->ctf_snapshot_lu
= 0;
138 fp
->ctf_flags
|= LCTF_DIRTY
;
140 ctf_set_ctl_hashes (fp
);
141 ctf_setmodel (fp
, CTF_MODEL_NATIVE
);
142 if (ctf_grow_ptrtab (fp
) < 0)
144 ctf_set_open_errno (errp
, ctf_errno (fp
));
152 ctf_dynhash_destroy (structs
);
153 ctf_dynhash_destroy (unions
);
154 ctf_dynhash_destroy (enums
);
155 ctf_dynhash_destroy (names
);
156 ctf_dynhash_destroy (objthash
);
157 ctf_dynhash_destroy (funchash
);
158 ctf_dynhash_destroy (dvhash
);
160 ctf_dynhash_destroy (dthash
);
165 /* Compatibility: just update the threshold for ctf_discard. */
167 ctf_update (ctf_dict_t
*fp
)
169 if (!(fp
->ctf_flags
& LCTF_RDWR
))
170 return (ctf_set_errno (fp
, ECTF_RDONLY
));
172 fp
->ctf_dtoldid
= fp
->ctf_typemax
;
177 ctf_name_table (ctf_dict_t
*fp
, int kind
)
182 return &fp
->ctf_structs
;
184 return &fp
->ctf_unions
;
186 return &fp
->ctf_enums
;
188 return &fp
->ctf_names
;
193 ctf_dtd_insert (ctf_dict_t
*fp
, ctf_dtdef_t
*dtd
, int flag
, int kind
)
196 if (ctf_dynhash_insert (fp
->ctf_dthash
, (void *) (uintptr_t) dtd
->dtd_type
,
199 ctf_set_errno (fp
, ENOMEM
);
203 if (flag
== CTF_ADD_ROOT
&& dtd
->dtd_data
.ctt_name
204 && (name
= ctf_strraw (fp
, dtd
->dtd_data
.ctt_name
)) != NULL
)
206 if (ctf_dynhash_insert (ctf_name_table (fp
, kind
)->ctn_writable
,
207 (char *) name
, (void *) (uintptr_t)
210 ctf_dynhash_remove (fp
->ctf_dthash
, (void *) (uintptr_t)
212 ctf_set_errno (fp
, ENOMEM
);
216 ctf_list_append (&fp
->ctf_dtdefs
, dtd
);
221 ctf_dtd_delete (ctf_dict_t
*fp
, ctf_dtdef_t
*dtd
)
223 ctf_dmdef_t
*dmd
, *nmd
;
224 int kind
= LCTF_INFO_KIND (fp
, dtd
->dtd_data
.ctt_info
);
225 int name_kind
= kind
;
228 ctf_dynhash_remove (fp
->ctf_dthash
, (void *) (uintptr_t) dtd
->dtd_type
);
229 free (dtd
->dtd_vlen
);
236 for (dmd
= ctf_list_next (&dtd
->dtd_u
.dtu_members
);
237 dmd
!= NULL
; dmd
= nmd
)
239 if (dmd
->dmd_name
!= NULL
)
240 free (dmd
->dmd_name
);
241 nmd
= ctf_list_next (dmd
);
246 free (dtd
->dtd_u
.dtu_argv
);
249 name_kind
= dtd
->dtd_data
.ctt_type
;
253 if (dtd
->dtd_data
.ctt_name
254 && (name
= ctf_strraw (fp
, dtd
->dtd_data
.ctt_name
)) != NULL
255 && LCTF_INFO_ISROOT (fp
, dtd
->dtd_data
.ctt_info
))
257 ctf_dynhash_remove (ctf_name_table (fp
, name_kind
)->ctn_writable
,
259 ctf_str_remove_ref (fp
, name
, &dtd
->dtd_data
.ctt_name
);
262 ctf_list_delete (&fp
->ctf_dtdefs
, dtd
);
267 ctf_dtd_lookup (const ctf_dict_t
*fp
, ctf_id_t type
)
269 return (ctf_dtdef_t
*)
270 ctf_dynhash_lookup (fp
->ctf_dthash
, (void *) (uintptr_t) type
);
274 ctf_dynamic_type (const ctf_dict_t
*fp
, ctf_id_t id
)
278 if (!(fp
->ctf_flags
& LCTF_RDWR
))
281 if ((fp
->ctf_flags
& LCTF_CHILD
) && LCTF_TYPE_ISPARENT (fp
, id
))
284 idx
= LCTF_TYPE_TO_INDEX(fp
, id
);
286 if ((unsigned long) idx
<= fp
->ctf_typemax
)
287 return ctf_dtd_lookup (fp
, id
);
292 ctf_dvd_insert (ctf_dict_t
*fp
, ctf_dvdef_t
*dvd
)
294 if (ctf_dynhash_insert (fp
->ctf_dvhash
, dvd
->dvd_name
, dvd
) < 0)
296 ctf_set_errno (fp
, ENOMEM
);
299 ctf_list_append (&fp
->ctf_dvdefs
, dvd
);
304 ctf_dvd_delete (ctf_dict_t
*fp
, ctf_dvdef_t
*dvd
)
306 ctf_dynhash_remove (fp
->ctf_dvhash
, dvd
->dvd_name
);
307 free (dvd
->dvd_name
);
309 ctf_list_delete (&fp
->ctf_dvdefs
, dvd
);
314 ctf_dvd_lookup (const ctf_dict_t
*fp
, const char *name
)
316 return (ctf_dvdef_t
*) ctf_dynhash_lookup (fp
->ctf_dvhash
, name
);
319 /* Discard all of the dynamic type definitions and variable definitions that
320 have been added to the dict since the last call to ctf_update(). We locate
321 such types by scanning the dtd list and deleting elements that have type IDs
322 greater than ctf_dtoldid, which is set by ctf_update(), above, and by
323 scanning the variable list and deleting elements that have update IDs equal
324 to the current value of the last-update snapshot count (indicating that they
325 were added after the most recent call to ctf_update()). */
327 ctf_discard (ctf_dict_t
*fp
)
329 ctf_snapshot_id_t last_update
=
331 fp
->ctf_snapshot_lu
+ 1 };
333 /* Update required? */
334 if (!(fp
->ctf_flags
& LCTF_DIRTY
))
337 return (ctf_rollback (fp
, last_update
));
341 ctf_snapshot (ctf_dict_t
*fp
)
343 ctf_snapshot_id_t snapid
;
344 snapid
.dtd_id
= fp
->ctf_typemax
;
345 snapid
.snapshot_id
= fp
->ctf_snapshots
++;
349 /* Like ctf_discard(), only discards everything after a particular ID. */
351 ctf_rollback (ctf_dict_t
*fp
, ctf_snapshot_id_t id
)
353 ctf_dtdef_t
*dtd
, *ntd
;
354 ctf_dvdef_t
*dvd
, *nvd
;
356 if (!(fp
->ctf_flags
& LCTF_RDWR
))
357 return (ctf_set_errno (fp
, ECTF_RDONLY
));
359 if (fp
->ctf_snapshot_lu
>= id
.snapshot_id
)
360 return (ctf_set_errno (fp
, ECTF_OVERROLLBACK
));
362 for (dtd
= ctf_list_next (&fp
->ctf_dtdefs
); dtd
!= NULL
; dtd
= ntd
)
367 ntd
= ctf_list_next (dtd
);
369 if (LCTF_TYPE_TO_INDEX (fp
, dtd
->dtd_type
) <= id
.dtd_id
)
372 kind
= LCTF_INFO_KIND (fp
, dtd
->dtd_data
.ctt_info
);
373 if (kind
== CTF_K_FORWARD
)
374 kind
= dtd
->dtd_data
.ctt_type
;
376 if (dtd
->dtd_data
.ctt_name
377 && (name
= ctf_strraw (fp
, dtd
->dtd_data
.ctt_name
)) != NULL
378 && LCTF_INFO_ISROOT (fp
, dtd
->dtd_data
.ctt_info
))
380 ctf_dynhash_remove (ctf_name_table (fp
, kind
)->ctn_writable
,
382 ctf_str_remove_ref (fp
, name
, &dtd
->dtd_data
.ctt_name
);
385 ctf_dynhash_remove (fp
->ctf_dthash
, (void *) (uintptr_t) dtd
->dtd_type
);
386 ctf_dtd_delete (fp
, dtd
);
389 for (dvd
= ctf_list_next (&fp
->ctf_dvdefs
); dvd
!= NULL
; dvd
= nvd
)
391 nvd
= ctf_list_next (dvd
);
393 if (dvd
->dvd_snapshots
<= id
.snapshot_id
)
396 ctf_dvd_delete (fp
, dvd
);
399 fp
->ctf_typemax
= id
.dtd_id
;
400 fp
->ctf_snapshots
= id
.snapshot_id
;
402 if (fp
->ctf_snapshots
== fp
->ctf_snapshot_lu
)
403 fp
->ctf_flags
&= ~LCTF_DIRTY
;
409 ctf_add_generic (ctf_dict_t
*fp
, uint32_t flag
, const char *name
, int kind
,
410 size_t vlen
, ctf_dtdef_t
**rp
)
415 if (flag
!= CTF_ADD_NONROOT
&& flag
!= CTF_ADD_ROOT
)
416 return (ctf_set_errno (fp
, EINVAL
));
418 if (!(fp
->ctf_flags
& LCTF_RDWR
))
419 return (ctf_set_errno (fp
, ECTF_RDONLY
));
421 if (LCTF_INDEX_TO_TYPE (fp
, fp
->ctf_typemax
, 1) >= CTF_MAX_TYPE
)
422 return (ctf_set_errno (fp
, ECTF_FULL
));
424 if (LCTF_INDEX_TO_TYPE (fp
, fp
->ctf_typemax
, 1) == (CTF_MAX_PTYPE
- 1))
425 return (ctf_set_errno (fp
, ECTF_FULL
));
427 /* Make sure ptrtab always grows to be big enough for all types. */
428 if (ctf_grow_ptrtab (fp
) < 0)
429 return CTF_ERR
; /* errno is set for us. */
431 if ((dtd
= calloc (1, sizeof (ctf_dtdef_t
))) == NULL
)
432 return (ctf_set_errno (fp
, EAGAIN
));
436 if ((dtd
->dtd_vlen
= calloc (1, vlen
)) == NULL
)
440 dtd
->dtd_vlen
= NULL
;
442 type
= ++fp
->ctf_typemax
;
443 type
= LCTF_INDEX_TO_TYPE (fp
, type
, (fp
->ctf_flags
& LCTF_CHILD
));
445 dtd
->dtd_data
.ctt_name
= ctf_str_add_ref (fp
, name
, &dtd
->dtd_data
.ctt_name
);
446 dtd
->dtd_type
= type
;
448 if (dtd
->dtd_data
.ctt_name
== 0 && name
!= NULL
&& name
[0] != '\0')
451 if (ctf_dtd_insert (fp
, dtd
, flag
, kind
) < 0)
452 goto err
; /* errno is set for us. */
454 fp
->ctf_flags
|= LCTF_DIRTY
;
460 ctf_set_errno (fp
, EAGAIN
);
462 free (dtd
->dtd_vlen
);
467 /* When encoding integer sizes, we want to convert a byte count in the range
468 1-8 to the closest power of 2 (e.g. 3->4, 5->8, etc). The clp2() function
469 is a clever implementation from "Hacker's Delight" by Henry Warren, Jr. */
485 ctf_add_encoded (ctf_dict_t
*fp
, uint32_t flag
,
486 const char *name
, const ctf_encoding_t
*ep
, uint32_t kind
)
493 return (ctf_set_errno (fp
, EINVAL
));
495 if (name
== NULL
|| name
[0] == '\0')
496 return (ctf_set_errno (fp
, ECTF_NONAME
));
498 if (!ctf_assert (fp
, kind
== CTF_K_INTEGER
|| kind
== CTF_K_FLOAT
))
499 return -1; /* errno is set for us. */
501 if ((type
= ctf_add_generic (fp
, flag
, name
, kind
, sizeof (uint32_t),
503 return CTF_ERR
; /* errno is set for us. */
505 dtd
->dtd_data
.ctt_info
= CTF_TYPE_INFO (kind
, flag
, 0);
506 dtd
->dtd_data
.ctt_size
= clp2 (P2ROUNDUP (ep
->cte_bits
, CHAR_BIT
)
511 encoding
= CTF_INT_DATA (ep
->cte_format
, ep
->cte_offset
, ep
->cte_bits
);
514 encoding
= CTF_FP_DATA (ep
->cte_format
, ep
->cte_offset
, ep
->cte_bits
);
517 memcpy (dtd
->dtd_vlen
, &encoding
, sizeof (encoding
));
523 ctf_add_reftype (ctf_dict_t
*fp
, uint32_t flag
, ctf_id_t ref
, uint32_t kind
)
527 ctf_dict_t
*tmp
= fp
;
528 int child
= fp
->ctf_flags
& LCTF_CHILD
;
530 if (ref
== CTF_ERR
|| ref
> CTF_MAX_TYPE
)
531 return (ctf_set_errno (fp
, EINVAL
));
533 if (ref
!= 0 && ctf_lookup_by_id (&tmp
, ref
) == NULL
)
534 return CTF_ERR
; /* errno is set for us. */
536 if ((type
= ctf_add_generic (fp
, flag
, NULL
, kind
, 0, &dtd
)) == CTF_ERR
)
537 return CTF_ERR
; /* errno is set for us. */
539 dtd
->dtd_data
.ctt_info
= CTF_TYPE_INFO (kind
, flag
, 0);
540 dtd
->dtd_data
.ctt_type
= (uint32_t) ref
;
542 if (kind
!= CTF_K_POINTER
)
545 /* If we are adding a pointer, update the ptrtab, pointing at this type from
546 the type it points to. Note that ctf_typemax is at this point one higher
547 than we want to check against, because it's just been incremented for the
548 addition of this type. The pptrtab is lazily-updated as needed, so is not
551 uint32_t type_idx
= LCTF_TYPE_TO_INDEX (fp
, type
);
552 uint32_t ref_idx
= LCTF_TYPE_TO_INDEX (fp
, ref
);
554 if (LCTF_TYPE_ISCHILD (fp
, ref
) == child
555 && ref_idx
< fp
->ctf_typemax
)
556 fp
->ctf_ptrtab
[ref_idx
] = type_idx
;
562 ctf_add_slice (ctf_dict_t
*fp
, uint32_t flag
, ctf_id_t ref
,
563 const ctf_encoding_t
*ep
)
567 ctf_id_t resolved_ref
= ref
;
570 const ctf_type_t
*tp
;
571 ctf_dict_t
*tmp
= fp
;
574 return (ctf_set_errno (fp
, EINVAL
));
576 if ((ep
->cte_bits
> 255) || (ep
->cte_offset
> 255))
577 return (ctf_set_errno (fp
, ECTF_SLICEOVERFLOW
));
579 if (ref
== CTF_ERR
|| ref
> CTF_MAX_TYPE
)
580 return (ctf_set_errno (fp
, EINVAL
));
582 if (ref
!= 0 && ((tp
= ctf_lookup_by_id (&tmp
, ref
)) == NULL
))
583 return CTF_ERR
; /* errno is set for us. */
585 /* Make sure we ultimately point to an integral type. We also allow slices to
586 point to the unimplemented type, for now, because the compiler can emit
587 such slices, though they're not very much use. */
589 resolved_ref
= ctf_type_resolve_unsliced (tmp
, ref
);
590 kind
= ctf_type_kind_unsliced (tmp
, resolved_ref
);
592 if ((kind
!= CTF_K_INTEGER
) && (kind
!= CTF_K_FLOAT
) &&
595 return (ctf_set_errno (fp
, ECTF_NOTINTFP
));
597 if ((type
= ctf_add_generic (fp
, flag
, NULL
, CTF_K_SLICE
,
598 sizeof (ctf_slice_t
), &dtd
)) == CTF_ERR
)
599 return CTF_ERR
; /* errno is set for us. */
601 memset (&slice
, 0, sizeof (ctf_slice_t
));
603 dtd
->dtd_data
.ctt_info
= CTF_TYPE_INFO (CTF_K_SLICE
, flag
, 0);
604 dtd
->dtd_data
.ctt_size
= clp2 (P2ROUNDUP (ep
->cte_bits
, CHAR_BIT
)
606 slice
.cts_type
= (uint32_t) ref
;
607 slice
.cts_bits
= ep
->cte_bits
;
608 slice
.cts_offset
= ep
->cte_offset
;
609 memcpy (dtd
->dtd_vlen
, &slice
, sizeof (ctf_slice_t
));
615 ctf_add_integer (ctf_dict_t
*fp
, uint32_t flag
,
616 const char *name
, const ctf_encoding_t
*ep
)
618 return (ctf_add_encoded (fp
, flag
, name
, ep
, CTF_K_INTEGER
));
622 ctf_add_float (ctf_dict_t
*fp
, uint32_t flag
,
623 const char *name
, const ctf_encoding_t
*ep
)
625 return (ctf_add_encoded (fp
, flag
, name
, ep
, CTF_K_FLOAT
));
629 ctf_add_pointer (ctf_dict_t
*fp
, uint32_t flag
, ctf_id_t ref
)
631 return (ctf_add_reftype (fp
, flag
, ref
, CTF_K_POINTER
));
635 ctf_add_array (ctf_dict_t
*fp
, uint32_t flag
, const ctf_arinfo_t
*arp
)
639 ctf_dict_t
*tmp
= fp
;
642 return (ctf_set_errno (fp
, EINVAL
));
644 if (arp
->ctr_contents
!= 0
645 && ctf_lookup_by_id (&tmp
, arp
->ctr_contents
) == NULL
)
646 return CTF_ERR
; /* errno is set for us. */
649 if (ctf_lookup_by_id (&tmp
, arp
->ctr_index
) == NULL
)
650 return CTF_ERR
; /* errno is set for us. */
652 if (ctf_type_kind (fp
, arp
->ctr_index
) == CTF_K_FORWARD
)
654 ctf_err_warn (fp
, 1, ECTF_INCOMPLETE
,
655 _("ctf_add_array: index type %lx is incomplete"),
657 return (ctf_set_errno (fp
, ECTF_INCOMPLETE
));
660 if ((type
= ctf_add_generic (fp
, flag
, NULL
, CTF_K_ARRAY
,
661 0, &dtd
)) == CTF_ERR
)
662 return CTF_ERR
; /* errno is set for us. */
664 dtd
->dtd_data
.ctt_info
= CTF_TYPE_INFO (CTF_K_ARRAY
, flag
, 0);
665 dtd
->dtd_data
.ctt_size
= 0;
666 dtd
->dtd_u
.dtu_arr
= *arp
;
672 ctf_set_array (ctf_dict_t
*fp
, ctf_id_t type
, const ctf_arinfo_t
*arp
)
674 ctf_dtdef_t
*dtd
= ctf_dtd_lookup (fp
, type
);
676 if (!(fp
->ctf_flags
& LCTF_RDWR
))
677 return (ctf_set_errno (fp
, ECTF_RDONLY
));
680 || LCTF_INFO_KIND (fp
, dtd
->dtd_data
.ctt_info
) != CTF_K_ARRAY
)
681 return (ctf_set_errno (fp
, ECTF_BADID
));
683 fp
->ctf_flags
|= LCTF_DIRTY
;
684 dtd
->dtd_u
.dtu_arr
= *arp
;
690 ctf_add_function (ctf_dict_t
*fp
, uint32_t flag
,
691 const ctf_funcinfo_t
*ctc
, const ctf_id_t
*argv
)
696 uint32_t *vdat
= NULL
;
697 ctf_dict_t
*tmp
= fp
;
700 if (!(fp
->ctf_flags
& LCTF_RDWR
))
701 return (ctf_set_errno (fp
, ECTF_RDONLY
));
703 if (ctc
== NULL
|| (ctc
->ctc_flags
& ~CTF_FUNC_VARARG
) != 0
704 || (ctc
->ctc_argc
!= 0 && argv
== NULL
))
705 return (ctf_set_errno (fp
, EINVAL
));
707 vlen
= ctc
->ctc_argc
;
708 if (ctc
->ctc_flags
& CTF_FUNC_VARARG
)
709 vlen
++; /* Add trailing zero to indicate varargs (see below). */
711 if (ctc
->ctc_return
!= 0
712 && ctf_lookup_by_id (&tmp
, ctc
->ctc_return
) == NULL
)
713 return CTF_ERR
; /* errno is set for us. */
715 if (vlen
> CTF_MAX_VLEN
)
716 return (ctf_set_errno (fp
, EOVERFLOW
));
718 if (vlen
!= 0 && (vdat
= malloc (sizeof (ctf_id_t
) * vlen
)) == NULL
)
719 return (ctf_set_errno (fp
, EAGAIN
));
721 for (i
= 0; i
< ctc
->ctc_argc
; i
++)
724 if (argv
[i
] != 0 && ctf_lookup_by_id (&tmp
, argv
[i
]) == NULL
)
727 return CTF_ERR
; /* errno is set for us. */
729 vdat
[i
] = (uint32_t) argv
[i
];
732 if ((type
= ctf_add_generic (fp
, flag
, NULL
, CTF_K_FUNCTION
,
733 0, &dtd
)) == CTF_ERR
)
736 return CTF_ERR
; /* errno is set for us. */
739 dtd
->dtd_data
.ctt_info
= CTF_TYPE_INFO (CTF_K_FUNCTION
, flag
, vlen
);
740 dtd
->dtd_data
.ctt_type
= (uint32_t) ctc
->ctc_return
;
742 if (ctc
->ctc_flags
& CTF_FUNC_VARARG
)
743 vdat
[vlen
- 1] = 0; /* Add trailing zero to indicate varargs. */
744 dtd
->dtd_u
.dtu_argv
= vdat
;
750 ctf_add_struct_sized (ctf_dict_t
*fp
, uint32_t flag
, const char *name
,
756 /* Promote root-visible forwards to structs. */
758 type
= ctf_lookup_by_rawname (fp
, CTF_K_STRUCT
, name
);
760 if (type
!= 0 && ctf_type_kind (fp
, type
) == CTF_K_FORWARD
)
761 dtd
= ctf_dtd_lookup (fp
, type
);
762 else if ((type
= ctf_add_generic (fp
, flag
, name
, CTF_K_STRUCT
,
763 0, &dtd
)) == CTF_ERR
)
764 return CTF_ERR
; /* errno is set for us. */
766 dtd
->dtd_data
.ctt_info
= CTF_TYPE_INFO (CTF_K_STRUCT
, flag
, 0);
768 if (size
> CTF_MAX_SIZE
)
770 dtd
->dtd_data
.ctt_size
= CTF_LSIZE_SENT
;
771 dtd
->dtd_data
.ctt_lsizehi
= CTF_SIZE_TO_LSIZE_HI (size
);
772 dtd
->dtd_data
.ctt_lsizelo
= CTF_SIZE_TO_LSIZE_LO (size
);
775 dtd
->dtd_data
.ctt_size
= (uint32_t) size
;
781 ctf_add_struct (ctf_dict_t
*fp
, uint32_t flag
, const char *name
)
783 return (ctf_add_struct_sized (fp
, flag
, name
, 0));
787 ctf_add_union_sized (ctf_dict_t
*fp
, uint32_t flag
, const char *name
,
793 /* Promote root-visible forwards to unions. */
795 type
= ctf_lookup_by_rawname (fp
, CTF_K_UNION
, name
);
797 if (type
!= 0 && ctf_type_kind (fp
, type
) == CTF_K_FORWARD
)
798 dtd
= ctf_dtd_lookup (fp
, type
);
799 else if ((type
= ctf_add_generic (fp
, flag
, name
, CTF_K_UNION
,
800 0, &dtd
)) == CTF_ERR
)
801 return CTF_ERR
; /* errno is set for us */
803 dtd
->dtd_data
.ctt_info
= CTF_TYPE_INFO (CTF_K_UNION
, flag
, 0);
805 if (size
> CTF_MAX_SIZE
)
807 dtd
->dtd_data
.ctt_size
= CTF_LSIZE_SENT
;
808 dtd
->dtd_data
.ctt_lsizehi
= CTF_SIZE_TO_LSIZE_HI (size
);
809 dtd
->dtd_data
.ctt_lsizelo
= CTF_SIZE_TO_LSIZE_LO (size
);
812 dtd
->dtd_data
.ctt_size
= (uint32_t) size
;
818 ctf_add_union (ctf_dict_t
*fp
, uint32_t flag
, const char *name
)
820 return (ctf_add_union_sized (fp
, flag
, name
, 0));
824 ctf_add_enum (ctf_dict_t
*fp
, uint32_t flag
, const char *name
)
829 /* Promote root-visible forwards to enums. */
831 type
= ctf_lookup_by_rawname (fp
, CTF_K_ENUM
, name
);
833 if (type
!= 0 && ctf_type_kind (fp
, type
) == CTF_K_FORWARD
)
834 dtd
= ctf_dtd_lookup (fp
, type
);
835 else if ((type
= ctf_add_generic (fp
, flag
, name
, CTF_K_ENUM
,
836 0, &dtd
)) == CTF_ERR
)
837 return CTF_ERR
; /* errno is set for us. */
839 dtd
->dtd_data
.ctt_info
= CTF_TYPE_INFO (CTF_K_ENUM
, flag
, 0);
840 dtd
->dtd_data
.ctt_size
= fp
->ctf_dmodel
->ctd_int
;
846 ctf_add_enum_encoded (ctf_dict_t
*fp
, uint32_t flag
, const char *name
,
847 const ctf_encoding_t
*ep
)
851 /* First, create the enum if need be, using most of the same machinery as
852 ctf_add_enum(), to ensure that we do not allow things past that are not
853 enums or forwards to them. (This includes other slices: you cannot slice a
854 slice, which would be a useless thing to do anyway.) */
857 type
= ctf_lookup_by_rawname (fp
, CTF_K_ENUM
, name
);
861 if ((ctf_type_kind (fp
, type
) != CTF_K_FORWARD
) &&
862 (ctf_type_kind_unsliced (fp
, type
) != CTF_K_ENUM
))
863 return (ctf_set_errno (fp
, ECTF_NOTINTFP
));
865 else if ((type
= ctf_add_enum (fp
, flag
, name
)) == CTF_ERR
)
866 return CTF_ERR
; /* errno is set for us. */
868 /* Now attach a suitable slice to it. */
870 return ctf_add_slice (fp
, flag
, type
, ep
);
874 ctf_add_forward (ctf_dict_t
*fp
, uint32_t flag
, const char *name
,
880 if (!ctf_forwardable_kind (kind
))
881 return (ctf_set_errno (fp
, ECTF_NOTSUE
));
883 if (name
== NULL
|| name
[0] == '\0')
884 return (ctf_set_errno (fp
, ECTF_NONAME
));
886 /* If the type is already defined or exists as a forward tag, just
887 return the ctf_id_t of the existing definition. */
889 type
= ctf_lookup_by_rawname (fp
, kind
, name
);
894 if ((type
= ctf_add_generic (fp
, flag
, name
, kind
, 0, &dtd
)) == CTF_ERR
)
895 return CTF_ERR
; /* errno is set for us. */
897 dtd
->dtd_data
.ctt_info
= CTF_TYPE_INFO (CTF_K_FORWARD
, flag
, 0);
898 dtd
->dtd_data
.ctt_type
= kind
;
904 ctf_add_typedef (ctf_dict_t
*fp
, uint32_t flag
, const char *name
,
909 ctf_dict_t
*tmp
= fp
;
911 if (ref
== CTF_ERR
|| ref
> CTF_MAX_TYPE
)
912 return (ctf_set_errno (fp
, EINVAL
));
914 if (name
== NULL
|| name
[0] == '\0')
915 return (ctf_set_errno (fp
, ECTF_NONAME
));
917 if (ref
!= 0 && ctf_lookup_by_id (&tmp
, ref
) == NULL
)
918 return CTF_ERR
; /* errno is set for us. */
920 if ((type
= ctf_add_generic (fp
, flag
, name
, CTF_K_TYPEDEF
, 0,
922 return CTF_ERR
; /* errno is set for us. */
924 dtd
->dtd_data
.ctt_info
= CTF_TYPE_INFO (CTF_K_TYPEDEF
, flag
, 0);
925 dtd
->dtd_data
.ctt_type
= (uint32_t) ref
;
931 ctf_add_volatile (ctf_dict_t
*fp
, uint32_t flag
, ctf_id_t ref
)
933 return (ctf_add_reftype (fp
, flag
, ref
, CTF_K_VOLATILE
));
937 ctf_add_const (ctf_dict_t
*fp
, uint32_t flag
, ctf_id_t ref
)
939 return (ctf_add_reftype (fp
, flag
, ref
, CTF_K_CONST
));
943 ctf_add_restrict (ctf_dict_t
*fp
, uint32_t flag
, ctf_id_t ref
)
945 return (ctf_add_reftype (fp
, flag
, ref
, CTF_K_RESTRICT
));
949 ctf_add_enumerator (ctf_dict_t
*fp
, ctf_id_t enid
, const char *name
,
952 ctf_dtdef_t
*dtd
= ctf_dtd_lookup (fp
, enid
);
955 uint32_t kind
, vlen
, root
;
959 return (ctf_set_errno (fp
, EINVAL
));
961 if (!(fp
->ctf_flags
& LCTF_RDWR
))
962 return (ctf_set_errno (fp
, ECTF_RDONLY
));
965 return (ctf_set_errno (fp
, ECTF_BADID
));
967 kind
= LCTF_INFO_KIND (fp
, dtd
->dtd_data
.ctt_info
);
968 root
= LCTF_INFO_ISROOT (fp
, dtd
->dtd_data
.ctt_info
);
969 vlen
= LCTF_INFO_VLEN (fp
, dtd
->dtd_data
.ctt_info
);
971 if (kind
!= CTF_K_ENUM
)
972 return (ctf_set_errno (fp
, ECTF_NOTENUM
));
974 if (vlen
== CTF_MAX_VLEN
)
975 return (ctf_set_errno (fp
, ECTF_DTFULL
));
977 for (dmd
= ctf_list_next (&dtd
->dtd_u
.dtu_members
);
978 dmd
!= NULL
; dmd
= ctf_list_next (dmd
))
980 if (strcmp (dmd
->dmd_name
, name
) == 0)
981 return (ctf_set_errno (fp
, ECTF_DUPLICATE
));
984 if ((dmd
= malloc (sizeof (ctf_dmdef_t
))) == NULL
)
985 return (ctf_set_errno (fp
, EAGAIN
));
987 if ((s
= strdup (name
)) == NULL
)
990 return (ctf_set_errno (fp
, EAGAIN
));
994 dmd
->dmd_type
= CTF_ERR
;
996 dmd
->dmd_value
= value
;
998 dtd
->dtd_data
.ctt_info
= CTF_TYPE_INFO (kind
, root
, vlen
+ 1);
999 ctf_list_append (&dtd
->dtd_u
.dtu_members
, dmd
);
1001 fp
->ctf_flags
|= LCTF_DIRTY
;
1007 ctf_add_member_offset (ctf_dict_t
*fp
, ctf_id_t souid
, const char *name
,
1008 ctf_id_t type
, unsigned long bit_offset
)
1010 ctf_dtdef_t
*dtd
= ctf_dtd_lookup (fp
, souid
);
1013 ssize_t msize
, malign
, ssize
;
1014 uint32_t kind
, vlen
, root
;
1016 int is_incomplete
= 0;
1018 if (!(fp
->ctf_flags
& LCTF_RDWR
))
1019 return (ctf_set_errno (fp
, ECTF_RDONLY
));
1022 return (ctf_set_errno (fp
, ECTF_BADID
));
1024 if (name
!= NULL
&& name
[0] == '\0')
1027 kind
= LCTF_INFO_KIND (fp
, dtd
->dtd_data
.ctt_info
);
1028 root
= LCTF_INFO_ISROOT (fp
, dtd
->dtd_data
.ctt_info
);
1029 vlen
= LCTF_INFO_VLEN (fp
, dtd
->dtd_data
.ctt_info
);
1031 if (kind
!= CTF_K_STRUCT
&& kind
!= CTF_K_UNION
)
1032 return (ctf_set_errno (fp
, ECTF_NOTSOU
));
1034 if (vlen
== CTF_MAX_VLEN
)
1035 return (ctf_set_errno (fp
, ECTF_DTFULL
));
1039 for (dmd
= ctf_list_next (&dtd
->dtd_u
.dtu_members
);
1040 dmd
!= NULL
; dmd
= ctf_list_next (dmd
))
1042 if (dmd
->dmd_name
!= NULL
&& strcmp (dmd
->dmd_name
, name
) == 0)
1043 return (ctf_set_errno (fp
, ECTF_DUPLICATE
));
1047 if ((msize
= ctf_type_size (fp
, type
)) < 0 ||
1048 (malign
= ctf_type_align (fp
, type
)) < 0)
1050 /* The unimplemented type, and any type that resolves to it, has no size
1051 and no alignment: it can correspond to any number of compiler-inserted
1052 types. We allow incomplete types through since they are routinely
1053 added to the ends of structures, and can even be added elsewhere in
1054 structures by the deduplicator. They are assumed to be zero-size with
1055 no alignment: this is often wrong, but problems can be avoided in this
1056 case by explicitly specifying the size of the structure via the _sized
1057 functions. The deduplicator always does this. */
1061 if (ctf_errno (fp
) == ECTF_NONREPRESENTABLE
)
1062 ctf_set_errno (fp
, 0);
1063 else if (ctf_errno (fp
) == ECTF_INCOMPLETE
)
1066 return -1; /* errno is set for us. */
1069 if ((dmd
= malloc (sizeof (ctf_dmdef_t
))) == NULL
)
1070 return (ctf_set_errno (fp
, EAGAIN
));
1072 if (name
!= NULL
&& (s
= strdup (name
)) == NULL
)
1075 return (ctf_set_errno (fp
, EAGAIN
));
1079 dmd
->dmd_type
= type
;
1080 dmd
->dmd_value
= -1;
1082 if (kind
== CTF_K_STRUCT
&& vlen
!= 0)
1084 if (bit_offset
== (unsigned long) - 1)
1086 /* Natural alignment. */
1088 ctf_dmdef_t
*lmd
= ctf_list_prev (&dtd
->dtd_u
.dtu_members
);
1089 ctf_id_t ltype
= ctf_type_resolve (fp
, lmd
->dmd_type
);
1090 size_t off
= lmd
->dmd_offset
;
1092 ctf_encoding_t linfo
;
1095 /* Propagate any error from ctf_type_resolve. If the last member was
1096 of unimplemented type, this may be -ECTF_NONREPRESENTABLE: we
1097 cannot insert right after such a member without explicit offset
1098 specification, because its alignment and size is not known. */
1099 if (ltype
== CTF_ERR
)
1102 return -1; /* errno is set for us. */
1107 ctf_err_warn (fp
, 1, ECTF_INCOMPLETE
,
1108 _("ctf_add_member_offset: cannot add member %s of "
1109 "incomplete type %lx to struct %lx without "
1110 "specifying explicit offset\n"),
1111 name
? name
: _("(unnamed member)"), type
, souid
);
1112 return (ctf_set_errno (fp
, ECTF_INCOMPLETE
));
1115 if (ctf_type_encoding (fp
, ltype
, &linfo
) == 0)
1116 off
+= linfo
.cte_bits
;
1117 else if ((lsize
= ctf_type_size (fp
, ltype
)) > 0)
1118 off
+= lsize
* CHAR_BIT
;
1119 else if (lsize
== -1 && ctf_errno (fp
) == ECTF_INCOMPLETE
)
1121 ctf_err_warn (fp
, 1, ECTF_INCOMPLETE
,
1122 _("ctf_add_member_offset: cannot add member %s of "
1123 "type %lx to struct %lx without specifying "
1124 "explicit offset after member %s of type %lx, "
1125 "which is an incomplete type\n"),
1126 name
? name
: _("(unnamed member)"), type
, souid
,
1127 lmd
->dmd_name
? lmd
->dmd_name
1128 : _("(unnamed member)"), ltype
);
1129 return -1; /* errno is set for us. */
1132 /* Round up the offset of the end of the last member to
1133 the next byte boundary, convert 'off' to bytes, and
1134 then round it up again to the next multiple of the
1135 alignment required by the new member. Finally,
1136 convert back to bits and store the result in
1137 dmd_offset. Technically we could do more efficient
1138 packing if the new member is a bit-field, but we're
1139 the "compiler" and ANSI says we can do as we choose. */
1141 off
= roundup (off
, CHAR_BIT
) / CHAR_BIT
;
1142 off
= roundup (off
, MAX (malign
, 1));
1143 dmd
->dmd_offset
= off
* CHAR_BIT
;
1144 ssize
= off
+ msize
;
1148 /* Specified offset in bits. */
1150 dmd
->dmd_offset
= bit_offset
;
1151 ssize
= ctf_get_ctt_size (fp
, &dtd
->dtd_data
, NULL
, NULL
);
1152 ssize
= MAX (ssize
, ((signed) bit_offset
/ CHAR_BIT
) + msize
);
1157 dmd
->dmd_offset
= 0;
1158 ssize
= ctf_get_ctt_size (fp
, &dtd
->dtd_data
, NULL
, NULL
);
1159 ssize
= MAX (ssize
, msize
);
1162 if ((size_t) ssize
> CTF_MAX_SIZE
)
1164 dtd
->dtd_data
.ctt_size
= CTF_LSIZE_SENT
;
1165 dtd
->dtd_data
.ctt_lsizehi
= CTF_SIZE_TO_LSIZE_HI (ssize
);
1166 dtd
->dtd_data
.ctt_lsizelo
= CTF_SIZE_TO_LSIZE_LO (ssize
);
1169 dtd
->dtd_data
.ctt_size
= (uint32_t) ssize
;
1171 dtd
->dtd_data
.ctt_info
= CTF_TYPE_INFO (kind
, root
, vlen
+ 1);
1172 ctf_list_append (&dtd
->dtd_u
.dtu_members
, dmd
);
1174 fp
->ctf_flags
|= LCTF_DIRTY
;
1179 ctf_add_member_encoded (ctf_dict_t
*fp
, ctf_id_t souid
, const char *name
,
1180 ctf_id_t type
, unsigned long bit_offset
,
1181 const ctf_encoding_t encoding
)
1183 ctf_dtdef_t
*dtd
= ctf_dtd_lookup (fp
, type
);
1184 int kind
= LCTF_INFO_KIND (fp
, dtd
->dtd_data
.ctt_info
);
1187 if ((kind
!= CTF_K_INTEGER
) && (kind
!= CTF_K_FLOAT
) && (kind
!= CTF_K_ENUM
))
1188 return (ctf_set_errno (fp
, ECTF_NOTINTFP
));
1190 if ((type
= ctf_add_slice (fp
, CTF_ADD_NONROOT
, otype
, &encoding
)) == CTF_ERR
)
1191 return -1; /* errno is set for us. */
1193 return ctf_add_member_offset (fp
, souid
, name
, type
, bit_offset
);
1197 ctf_add_member (ctf_dict_t
*fp
, ctf_id_t souid
, const char *name
,
1200 return ctf_add_member_offset (fp
, souid
, name
, type
, (unsigned long) - 1);
1204 ctf_add_variable (ctf_dict_t
*fp
, const char *name
, ctf_id_t ref
)
1207 ctf_dict_t
*tmp
= fp
;
1209 if (!(fp
->ctf_flags
& LCTF_RDWR
))
1210 return (ctf_set_errno (fp
, ECTF_RDONLY
));
1212 if (ctf_dvd_lookup (fp
, name
) != NULL
)
1213 return (ctf_set_errno (fp
, ECTF_DUPLICATE
));
1215 if (ctf_lookup_by_id (&tmp
, ref
) == NULL
)
1216 return -1; /* errno is set for us. */
1218 /* Make sure this type is representable. */
1219 if ((ctf_type_resolve (fp
, ref
) == CTF_ERR
)
1220 && (ctf_errno (fp
) == ECTF_NONREPRESENTABLE
))
1223 if ((dvd
= malloc (sizeof (ctf_dvdef_t
))) == NULL
)
1224 return (ctf_set_errno (fp
, EAGAIN
));
1226 if (name
!= NULL
&& (dvd
->dvd_name
= strdup (name
)) == NULL
)
1229 return (ctf_set_errno (fp
, EAGAIN
));
1231 dvd
->dvd_type
= ref
;
1232 dvd
->dvd_snapshots
= fp
->ctf_snapshots
;
1234 if (ctf_dvd_insert (fp
, dvd
) < 0)
1236 free (dvd
->dvd_name
);
1238 return -1; /* errno is set for us. */
1241 fp
->ctf_flags
|= LCTF_DIRTY
;
1246 ctf_add_funcobjt_sym (ctf_dict_t
*fp
, int is_function
, const char *name
, ctf_id_t id
)
1248 ctf_dict_t
*tmp
= fp
;
1250 ctf_dynhash_t
*h
= is_function
? fp
->ctf_funchash
: fp
->ctf_objthash
;
1252 if (!(fp
->ctf_flags
& LCTF_RDWR
))
1253 return (ctf_set_errno (fp
, ECTF_RDONLY
));
1255 if (ctf_dynhash_lookup (fp
->ctf_objthash
, name
) != NULL
||
1256 ctf_dynhash_lookup (fp
->ctf_funchash
, name
) != NULL
)
1257 return (ctf_set_errno (fp
, ECTF_DUPLICATE
));
1259 if (ctf_lookup_by_id (&tmp
, id
) == NULL
)
1260 return -1; /* errno is set for us. */
1262 if (is_function
&& ctf_type_kind (fp
, id
) != CTF_K_FUNCTION
)
1263 return (ctf_set_errno (fp
, ECTF_NOTFUNC
));
1265 if ((dupname
= strdup (name
)) == NULL
)
1266 return (ctf_set_errno (fp
, ENOMEM
));
1268 if (ctf_dynhash_insert (h
, dupname
, (void *) (uintptr_t) id
) < 0)
1271 return (ctf_set_errno (fp
, ENOMEM
));
1277 ctf_add_objt_sym (ctf_dict_t
*fp
, const char *name
, ctf_id_t id
)
1279 return (ctf_add_funcobjt_sym (fp
, 0, name
, id
));
1283 ctf_add_func_sym (ctf_dict_t
*fp
, const char *name
, ctf_id_t id
)
1285 return (ctf_add_funcobjt_sym (fp
, 1, name
, id
));
1288 typedef struct ctf_bundle
1290 ctf_dict_t
*ctb_dict
; /* CTF dict handle. */
1291 ctf_id_t ctb_type
; /* CTF type identifier. */
1292 ctf_dtdef_t
*ctb_dtd
; /* CTF dynamic type definition (if any). */
1296 enumcmp (const char *name
, int value
, void *arg
)
1298 ctf_bundle_t
*ctb
= arg
;
1301 if (ctf_enum_value (ctb
->ctb_dict
, ctb
->ctb_type
, name
, &bvalue
) < 0)
1303 ctf_err_warn (ctb
->ctb_dict
, 0, 0,
1304 _("conflict due to enum %s iteration error"), name
);
1307 if (value
!= bvalue
)
1309 ctf_err_warn (ctb
->ctb_dict
, 1, ECTF_CONFLICT
,
1310 _("conflict due to enum value change: %i versus %i"),
1318 enumadd (const char *name
, int value
, void *arg
)
1320 ctf_bundle_t
*ctb
= arg
;
1322 return (ctf_add_enumerator (ctb
->ctb_dict
, ctb
->ctb_type
,
1327 membcmp (const char *name
, ctf_id_t type _libctf_unused_
, unsigned long offset
,
1330 ctf_bundle_t
*ctb
= arg
;
1333 /* Don't check nameless members (e.g. anonymous structs/unions) against each
1338 if (ctf_member_info (ctb
->ctb_dict
, ctb
->ctb_type
, name
, &ctm
) < 0)
1340 ctf_err_warn (ctb
->ctb_dict
, 0, 0,
1341 _("conflict due to struct member %s iteration error"),
1345 if (ctm
.ctm_offset
!= offset
)
1347 ctf_err_warn (ctb
->ctb_dict
, 1, ECTF_CONFLICT
,
1348 _("conflict due to struct member %s offset change: "
1350 name
, ctm
.ctm_offset
, offset
);
1357 membadd (const char *name
, ctf_id_t type
, unsigned long offset
, void *arg
)
1359 ctf_bundle_t
*ctb
= arg
;
1363 if ((dmd
= malloc (sizeof (ctf_dmdef_t
))) == NULL
)
1364 return (ctf_set_errno (ctb
->ctb_dict
, EAGAIN
));
1366 /* Unnamed members in non-dynamic dicts have a name of "", while dynamic dicts
1372 if (name
!= NULL
&& (s
= strdup (name
)) == NULL
)
1375 return (ctf_set_errno (ctb
->ctb_dict
, EAGAIN
));
1378 /* For now, dmd_type is copied as the src_fp's type; it is reset to an
1379 equivalent dst_fp type by a final loop in ctf_add_type(), below. */
1381 dmd
->dmd_type
= type
;
1382 dmd
->dmd_offset
= offset
;
1383 dmd
->dmd_value
= -1;
1385 ctf_list_append (&ctb
->ctb_dtd
->dtd_u
.dtu_members
, dmd
);
1387 ctb
->ctb_dict
->ctf_flags
|= LCTF_DIRTY
;
1391 /* Record the correspondence between a source and ctf_add_type()-added
1392 destination type: both types are translated into parent type IDs if need be,
1393 so they relate to the actual dictionary they are in. Outside controlled
1394 circumstances (like linking) it is probably not useful to do more than
1395 compare these pointers, since there is nothing stopping the user closing the
1396 source dict whenever they want to.
1398 Our OOM handling here is just to not do anything, because this is called deep
1399 enough in the call stack that doing anything useful is painfully difficult:
1400 the worst consequence if we do OOM is a bit of type duplication anyway. */
1403 ctf_add_type_mapping (ctf_dict_t
*src_fp
, ctf_id_t src_type
,
1404 ctf_dict_t
*dst_fp
, ctf_id_t dst_type
)
1406 if (LCTF_TYPE_ISPARENT (src_fp
, src_type
) && src_fp
->ctf_parent
)
1407 src_fp
= src_fp
->ctf_parent
;
1409 src_type
= LCTF_TYPE_TO_INDEX(src_fp
, src_type
);
1411 if (LCTF_TYPE_ISPARENT (dst_fp
, dst_type
) && dst_fp
->ctf_parent
)
1412 dst_fp
= dst_fp
->ctf_parent
;
1414 dst_type
= LCTF_TYPE_TO_INDEX(dst_fp
, dst_type
);
1416 if (dst_fp
->ctf_link_type_mapping
== NULL
)
1418 ctf_hash_fun f
= ctf_hash_type_key
;
1419 ctf_hash_eq_fun e
= ctf_hash_eq_type_key
;
1421 if ((dst_fp
->ctf_link_type_mapping
= ctf_dynhash_create (f
, e
, free
,
1426 ctf_link_type_key_t
*key
;
1427 key
= calloc (1, sizeof (struct ctf_link_type_key
));
1431 key
->cltk_fp
= src_fp
;
1432 key
->cltk_idx
= src_type
;
1434 /* No OOM checking needed, because if this doesn't work the worst we'll do is
1435 add a few more duplicate types (which will probably run out of memory
1437 ctf_dynhash_insert (dst_fp
->ctf_link_type_mapping
, key
,
1438 (void *) (uintptr_t) dst_type
);
1441 /* Look up a type mapping: return 0 if none. The DST_FP is modified to point to
1442 the parent if need be. The ID returned is from the dst_fp's perspective. */
1444 ctf_type_mapping (ctf_dict_t
*src_fp
, ctf_id_t src_type
, ctf_dict_t
**dst_fp
)
1446 ctf_link_type_key_t key
;
1447 ctf_dict_t
*target_fp
= *dst_fp
;
1448 ctf_id_t dst_type
= 0;
1450 if (LCTF_TYPE_ISPARENT (src_fp
, src_type
) && src_fp
->ctf_parent
)
1451 src_fp
= src_fp
->ctf_parent
;
1453 src_type
= LCTF_TYPE_TO_INDEX(src_fp
, src_type
);
1454 key
.cltk_fp
= src_fp
;
1455 key
.cltk_idx
= src_type
;
1457 if (target_fp
->ctf_link_type_mapping
)
1458 dst_type
= (uintptr_t) ctf_dynhash_lookup (target_fp
->ctf_link_type_mapping
,
1463 dst_type
= LCTF_INDEX_TO_TYPE (target_fp
, dst_type
,
1464 target_fp
->ctf_parent
!= NULL
);
1465 *dst_fp
= target_fp
;
1469 if (target_fp
->ctf_parent
)
1470 target_fp
= target_fp
->ctf_parent
;
1474 if (target_fp
->ctf_link_type_mapping
)
1475 dst_type
= (uintptr_t) ctf_dynhash_lookup (target_fp
->ctf_link_type_mapping
,
1479 dst_type
= LCTF_INDEX_TO_TYPE (target_fp
, dst_type
,
1480 target_fp
->ctf_parent
!= NULL
);
1482 *dst_fp
= target_fp
;
1486 /* The ctf_add_type routine is used to copy a type from a source CTF dictionary
1487 to a dynamic destination dictionary. This routine operates recursively by
1488 following the source type's links and embedded member types. If the
1489 destination dict already contains a named type which has the same attributes,
1490 then we succeed and return this type but no changes occur. */
1492 ctf_add_type_internal (ctf_dict_t
*dst_fp
, ctf_dict_t
*src_fp
, ctf_id_t src_type
,
1493 ctf_dict_t
*proc_tracking_fp
)
1495 ctf_id_t dst_type
= CTF_ERR
;
1496 uint32_t dst_kind
= CTF_K_UNKNOWN
;
1497 ctf_dict_t
*tmp_fp
= dst_fp
;
1501 uint32_t kind
, forward_kind
, flag
, vlen
;
1503 const ctf_type_t
*src_tp
, *dst_tp
;
1504 ctf_bundle_t src
, dst
;
1505 ctf_encoding_t src_en
, dst_en
;
1506 ctf_arinfo_t src_ar
, dst_ar
;
1510 ctf_id_t orig_src_type
= src_type
;
1512 if (!(dst_fp
->ctf_flags
& LCTF_RDWR
))
1513 return (ctf_set_errno (dst_fp
, ECTF_RDONLY
));
1515 if ((src_tp
= ctf_lookup_by_id (&src_fp
, src_type
)) == NULL
)
1516 return (ctf_set_errno (dst_fp
, ctf_errno (src_fp
)));
1518 if ((ctf_type_resolve (src_fp
, src_type
) == CTF_ERR
)
1519 && (ctf_errno (src_fp
) == ECTF_NONREPRESENTABLE
))
1520 return (ctf_set_errno (dst_fp
, ECTF_NONREPRESENTABLE
));
1522 name
= ctf_strptr (src_fp
, src_tp
->ctt_name
);
1523 kind
= LCTF_INFO_KIND (src_fp
, src_tp
->ctt_info
);
1524 flag
= LCTF_INFO_ISROOT (src_fp
, src_tp
->ctt_info
);
1525 vlen
= LCTF_INFO_VLEN (src_fp
, src_tp
->ctt_info
);
1527 /* If this is a type we are currently in the middle of adding, hand it
1528 straight back. (This lets us handle self-referential structures without
1529 considering forwards and empty structures the same as their completed
1532 tmp
= ctf_type_mapping (src_fp
, src_type
, &tmp_fp
);
1536 if (ctf_dynhash_lookup (proc_tracking_fp
->ctf_add_processing
,
1537 (void *) (uintptr_t) src_type
))
1540 /* If this type has already been added from this dictionary, and is the
1541 same kind and (if a struct or union) has the same number of members,
1542 hand it straight back. */
1544 if (ctf_type_kind_unsliced (tmp_fp
, tmp
) == (int) kind
)
1546 if (kind
== CTF_K_STRUCT
|| kind
== CTF_K_UNION
1547 || kind
== CTF_K_ENUM
)
1549 if ((dst_tp
= ctf_lookup_by_id (&tmp_fp
, dst_type
)) != NULL
)
1550 if (vlen
== LCTF_INFO_VLEN (tmp_fp
, dst_tp
->ctt_info
))
1558 forward_kind
= kind
;
1559 if (kind
== CTF_K_FORWARD
)
1560 forward_kind
= src_tp
->ctt_type
;
1562 /* If the source type has a name and is a root type (visible at the top-level
1563 scope), lookup the name in the destination dictionary and verify that it is
1564 of the same kind before we do anything else. */
1566 if ((flag
& CTF_ADD_ROOT
) && name
[0] != '\0'
1567 && (tmp
= ctf_lookup_by_rawname (dst_fp
, forward_kind
, name
)) != 0)
1570 dst_kind
= ctf_type_kind_unsliced (dst_fp
, dst_type
);
1573 /* If an identically named dst_type exists, fail with ECTF_CONFLICT
1574 unless dst_type is a forward declaration and src_type is a struct,
1575 union, or enum (i.e. the definition of the previous forward decl).
1577 We also allow addition in the opposite order (addition of a forward when a
1578 struct, union, or enum already exists), which is a NOP and returns the
1579 already-present struct, union, or enum. */
1581 if (dst_type
!= CTF_ERR
&& dst_kind
!= kind
)
1583 if (kind
== CTF_K_FORWARD
1584 && (dst_kind
== CTF_K_ENUM
|| dst_kind
== CTF_K_STRUCT
1585 || dst_kind
== CTF_K_UNION
))
1587 ctf_add_type_mapping (src_fp
, src_type
, dst_fp
, dst_type
);
1591 if (dst_kind
!= CTF_K_FORWARD
1592 || (kind
!= CTF_K_ENUM
&& kind
!= CTF_K_STRUCT
1593 && kind
!= CTF_K_UNION
))
1595 ctf_err_warn (dst_fp
, 1, ECTF_CONFLICT
,
1596 _("ctf_add_type: conflict for type %s: "
1597 "kinds differ, new: %i; old (ID %lx): %i"),
1598 name
, kind
, dst_type
, dst_kind
);
1599 return (ctf_set_errno (dst_fp
, ECTF_CONFLICT
));
1603 /* We take special action for an integer, float, or slice since it is
1604 described not only by its name but also its encoding. For integers,
1605 bit-fields exploit this degeneracy. */
1607 if (kind
== CTF_K_INTEGER
|| kind
== CTF_K_FLOAT
|| kind
== CTF_K_SLICE
)
1609 if (ctf_type_encoding (src_fp
, src_type
, &src_en
) != 0)
1610 return (ctf_set_errno (dst_fp
, ctf_errno (src_fp
)));
1612 if (dst_type
!= CTF_ERR
)
1614 ctf_dict_t
*fp
= dst_fp
;
1616 if ((dst_tp
= ctf_lookup_by_id (&fp
, dst_type
)) == NULL
)
1619 if (ctf_type_encoding (dst_fp
, dst_type
, &dst_en
) != 0)
1620 return CTF_ERR
; /* errno set for us. */
1622 if (LCTF_INFO_ISROOT (fp
, dst_tp
->ctt_info
) & CTF_ADD_ROOT
)
1624 /* The type that we found in the hash is also root-visible. If
1625 the two types match then use the existing one; otherwise,
1626 declare a conflict. Note: slices are not certain to match
1627 even if there is no conflict: we must check the contained type
1630 if (memcmp (&src_en
, &dst_en
, sizeof (ctf_encoding_t
)) == 0)
1632 if (kind
!= CTF_K_SLICE
)
1634 ctf_add_type_mapping (src_fp
, src_type
, dst_fp
, dst_type
);
1640 return (ctf_set_errno (dst_fp
, ECTF_CONFLICT
));
1645 /* We found a non-root-visible type in the hash. If its encoding
1646 is the same, we can reuse it, unless it is a slice. */
1648 if (memcmp (&src_en
, &dst_en
, sizeof (ctf_encoding_t
)) == 0)
1650 if (kind
!= CTF_K_SLICE
)
1652 ctf_add_type_mapping (src_fp
, src_type
, dst_fp
, dst_type
);
1660 src
.ctb_dict
= src_fp
;
1661 src
.ctb_type
= src_type
;
1664 dst
.ctb_dict
= dst_fp
;
1665 dst
.ctb_type
= dst_type
;
1668 /* Now perform kind-specific processing. If dst_type is CTF_ERR, then we add
1669 a new type with the same properties as src_type to dst_fp. If dst_type is
1670 not CTF_ERR, then we verify that dst_type has the same attributes as
1671 src_type. We recurse for embedded references. Before we start, we note
1672 that we are processing this type, to prevent infinite recursion: we do not
1673 re-process any type that appears in this list. The list is emptied
1674 wholesale at the end of processing everything in this recursive stack. */
1676 if (ctf_dynhash_insert (proc_tracking_fp
->ctf_add_processing
,
1677 (void *) (uintptr_t) src_type
, (void *) 1) < 0)
1678 return ctf_set_errno (dst_fp
, ENOMEM
);
1683 /* If we found a match we will have either returned it or declared a
1685 dst_type
= ctf_add_integer (dst_fp
, flag
, name
, &src_en
);
1689 /* If we found a match we will have either returned it or declared a
1691 dst_type
= ctf_add_float (dst_fp
, flag
, name
, &src_en
);
1695 /* We have checked for conflicting encodings: now try to add the
1697 src_type
= ctf_type_reference (src_fp
, src_type
);
1698 src_type
= ctf_add_type_internal (dst_fp
, src_fp
, src_type
,
1701 if (src_type
== CTF_ERR
)
1702 return CTF_ERR
; /* errno is set for us. */
1704 dst_type
= ctf_add_slice (dst_fp
, flag
, src_type
, &src_en
);
1708 case CTF_K_VOLATILE
:
1710 case CTF_K_RESTRICT
:
1711 src_type
= ctf_type_reference (src_fp
, src_type
);
1712 src_type
= ctf_add_type_internal (dst_fp
, src_fp
, src_type
,
1715 if (src_type
== CTF_ERR
)
1716 return CTF_ERR
; /* errno is set for us. */
1718 dst_type
= ctf_add_reftype (dst_fp
, flag
, src_type
, kind
);
1722 if (ctf_array_info (src_fp
, src_type
, &src_ar
) != 0)
1723 return (ctf_set_errno (dst_fp
, ctf_errno (src_fp
)));
1725 src_ar
.ctr_contents
=
1726 ctf_add_type_internal (dst_fp
, src_fp
, src_ar
.ctr_contents
,
1728 src_ar
.ctr_index
= ctf_add_type_internal (dst_fp
, src_fp
,
1731 src_ar
.ctr_nelems
= src_ar
.ctr_nelems
;
1733 if (src_ar
.ctr_contents
== CTF_ERR
|| src_ar
.ctr_index
== CTF_ERR
)
1734 return CTF_ERR
; /* errno is set for us. */
1736 if (dst_type
!= CTF_ERR
)
1738 if (ctf_array_info (dst_fp
, dst_type
, &dst_ar
) != 0)
1739 return CTF_ERR
; /* errno is set for us. */
1741 if (memcmp (&src_ar
, &dst_ar
, sizeof (ctf_arinfo_t
)))
1743 ctf_err_warn (dst_fp
, 1, ECTF_CONFLICT
,
1744 _("conflict for type %s against ID %lx: array info "
1745 "differs, old %lx/%lx/%x; new: %lx/%lx/%x"),
1746 name
, dst_type
, src_ar
.ctr_contents
,
1747 src_ar
.ctr_index
, src_ar
.ctr_nelems
,
1748 dst_ar
.ctr_contents
, dst_ar
.ctr_index
,
1750 return (ctf_set_errno (dst_fp
, ECTF_CONFLICT
));
1754 dst_type
= ctf_add_array (dst_fp
, flag
, &src_ar
);
1757 case CTF_K_FUNCTION
:
1758 ctc
.ctc_return
= ctf_add_type_internal (dst_fp
, src_fp
,
1764 if (ctc
.ctc_return
== CTF_ERR
)
1765 return CTF_ERR
; /* errno is set for us. */
1767 dst_type
= ctf_add_function (dst_fp
, flag
, &ctc
, NULL
);
1779 /* Technically to match a struct or union we need to check both
1780 ways (src members vs. dst, dst members vs. src) but we make
1781 this more optimal by only checking src vs. dst and comparing
1782 the total size of the structure (which we must do anyway)
1783 which covers the possibility of dst members not in src.
1784 This optimization can be defeated for unions, but is so
1785 pathological as to render it irrelevant for our purposes. */
1787 if (dst_type
!= CTF_ERR
&& kind
!= CTF_K_FORWARD
1788 && dst_kind
!= CTF_K_FORWARD
)
1790 if (ctf_type_size (src_fp
, src_type
) !=
1791 ctf_type_size (dst_fp
, dst_type
))
1793 ctf_err_warn (dst_fp
, 1, ECTF_CONFLICT
,
1794 _("conflict for type %s against ID %lx: union "
1795 "size differs, old %li, new %li"), name
,
1796 dst_type
, (long) ctf_type_size (src_fp
, src_type
),
1797 (long) ctf_type_size (dst_fp
, dst_type
));
1798 return (ctf_set_errno (dst_fp
, ECTF_CONFLICT
));
1801 if (ctf_member_iter (src_fp
, src_type
, membcmp
, &dst
))
1803 ctf_err_warn (dst_fp
, 1, ECTF_CONFLICT
,
1804 _("conflict for type %s against ID %lx: members "
1805 "differ, see above"), name
, dst_type
);
1806 return (ctf_set_errno (dst_fp
, ECTF_CONFLICT
));
1812 /* Unlike the other cases, copying structs and unions is done
1813 manually so as to avoid repeated lookups in ctf_add_member
1814 and to ensure the exact same member offsets as in src_type. */
1816 dst_type
= ctf_add_generic (dst_fp
, flag
, name
, kind
, 0, &dtd
);
1817 if (dst_type
== CTF_ERR
)
1818 return CTF_ERR
; /* errno is set for us. */
1820 dst
.ctb_type
= dst_type
;
1823 /* Pre-emptively add this struct to the type mapping so that
1824 structures that refer to themselves work. */
1825 ctf_add_type_mapping (src_fp
, src_type
, dst_fp
, dst_type
);
1827 if (ctf_member_iter (src_fp
, src_type
, membadd
, &dst
) != 0)
1828 errs
++; /* Increment errs and fail at bottom of case. */
1830 if ((ssize
= ctf_type_size (src_fp
, src_type
)) < 0)
1831 return CTF_ERR
; /* errno is set for us. */
1833 size
= (size_t) ssize
;
1834 if (size
> CTF_MAX_SIZE
)
1836 dtd
->dtd_data
.ctt_size
= CTF_LSIZE_SENT
;
1837 dtd
->dtd_data
.ctt_lsizehi
= CTF_SIZE_TO_LSIZE_HI (size
);
1838 dtd
->dtd_data
.ctt_lsizelo
= CTF_SIZE_TO_LSIZE_LO (size
);
1841 dtd
->dtd_data
.ctt_size
= (uint32_t) size
;
1843 dtd
->dtd_data
.ctt_info
= CTF_TYPE_INFO (kind
, flag
, vlen
);
1845 /* Make a final pass through the members changing each dmd_type (a
1846 src_fp type) to an equivalent type in dst_fp. We pass through all
1847 members, leaving any that fail set to CTF_ERR, unless they fail
1848 because they are marking a member of type not representable in this
1849 version of CTF, in which case we just want to silently omit them:
1850 no consumer can do anything with them anyway. */
1851 for (dmd
= ctf_list_next (&dtd
->dtd_u
.dtu_members
);
1852 dmd
!= NULL
; dmd
= ctf_list_next (dmd
))
1854 ctf_dict_t
*dst
= dst_fp
;
1857 memb_type
= ctf_type_mapping (src_fp
, dmd
->dmd_type
, &dst
);
1860 if ((dmd
->dmd_type
=
1861 ctf_add_type_internal (dst_fp
, src_fp
, dmd
->dmd_type
,
1862 proc_tracking_fp
)) == CTF_ERR
)
1864 if (ctf_errno (dst_fp
) != ECTF_NONREPRESENTABLE
)
1869 dmd
->dmd_type
= memb_type
;
1873 return CTF_ERR
; /* errno is set for us. */
1878 if (dst_type
!= CTF_ERR
&& kind
!= CTF_K_FORWARD
1879 && dst_kind
!= CTF_K_FORWARD
)
1881 if (ctf_enum_iter (src_fp
, src_type
, enumcmp
, &dst
)
1882 || ctf_enum_iter (dst_fp
, dst_type
, enumcmp
, &src
))
1884 ctf_err_warn (dst_fp
, 1, ECTF_CONFLICT
,
1885 _("conflict for enum %s against ID %lx: members "
1886 "differ, see above"), name
, dst_type
);
1887 return (ctf_set_errno (dst_fp
, ECTF_CONFLICT
));
1892 dst_type
= ctf_add_enum (dst_fp
, flag
, name
);
1893 if ((dst
.ctb_type
= dst_type
) == CTF_ERR
1894 || ctf_enum_iter (src_fp
, src_type
, enumadd
, &dst
))
1895 return CTF_ERR
; /* errno is set for us */
1900 if (dst_type
== CTF_ERR
)
1901 dst_type
= ctf_add_forward (dst_fp
, flag
, name
, forward_kind
);
1905 src_type
= ctf_type_reference (src_fp
, src_type
);
1906 src_type
= ctf_add_type_internal (dst_fp
, src_fp
, src_type
,
1909 if (src_type
== CTF_ERR
)
1910 return CTF_ERR
; /* errno is set for us. */
1912 /* If dst_type is not CTF_ERR at this point, we should check if
1913 ctf_type_reference(dst_fp, dst_type) != src_type and if so fail with
1914 ECTF_CONFLICT. However, this causes problems with bitness typedefs
1915 that vary based on things like if 32-bit then pid_t is int otherwise
1916 long. We therefore omit this check and assume that if the identically
1917 named typedef already exists in dst_fp, it is correct or
1920 if (dst_type
== CTF_ERR
)
1921 dst_type
= ctf_add_typedef (dst_fp
, flag
, name
, src_type
);
1926 return (ctf_set_errno (dst_fp
, ECTF_CORRUPT
));
1929 if (dst_type
!= CTF_ERR
)
1930 ctf_add_type_mapping (src_fp
, orig_src_type
, dst_fp
, dst_type
);
1935 ctf_add_type (ctf_dict_t
*dst_fp
, ctf_dict_t
*src_fp
, ctf_id_t src_type
)
1939 if (!src_fp
->ctf_add_processing
)
1940 src_fp
->ctf_add_processing
= ctf_dynhash_create (ctf_hash_integer
,
1941 ctf_hash_eq_integer
,
1944 /* We store the hash on the source, because it contains only source type IDs:
1945 but callers will invariably expect errors to appear on the dest. */
1946 if (!src_fp
->ctf_add_processing
)
1947 return (ctf_set_errno (dst_fp
, ENOMEM
));
1949 id
= ctf_add_type_internal (dst_fp
, src_fp
, src_type
, src_fp
);
1950 ctf_dynhash_empty (src_fp
->ctf_add_processing
);